hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
13c2849ffaa2a86bc4338f6a16d3b702adc91198.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "circleBoxTest.cu_inl"
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles(int imageWidth, int imageHeight) {
//TODO: convert short to int
//TODO: can direct get width from const params
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
for (int index = 0; index < cuConstRendererParams.numCircles; index++) {
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
// const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x;
float rad = cuConstRendererParams.radius[index];
// BlockDim = 256 x1, gridDim = 4x4
int circleInBox = circleInBoxConservative(p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
if((threadIdx.x + threadIdx.y)== 0) {
printf("Blk : %dx%d, grid: %d %d\n", blockIdx.x, blockIdx.y, gridDim.x, circleInBox);
printf("circleInBoxConservative p.x : %f, p.y : %f , rad : %f, %f, %f, %f, %f\n",
p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
}
if(circleInBox == 0) { return; }
/*
if((threadIdx.x + threadIdx.y)== 0) {
printf("Blk : %d, grid: %d\n", blockIdx.x, gridDim.x);
}*/
for(int tid_x = threadIdx.x ; tid_x < (imageWidth/gridDim.x); tid_x +=blockDim.x) {
for(int tid_y = threadIdx.y ; tid_y < (imageHeight/gridDim.y); tid_y +=blockDim.y) {
int x = blockIdx.x*(imageWidth/gridDim.x) + tid_x;
int y = blockIdx.y*(imageHeight/gridDim.y) + tid_y;
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
shadePixel(index, pixelCenterNorm, p, imgPtr);
}
}
__syncthreads(); //TODO: is this even needed?
}
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
hipFree(cudaDevicePosition);
hipFree(cudaDeviceVelocity);
hipFree(cudaDeviceColor);
hipFree(cudaDeviceRadius);
hipFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
hipMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
hipMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// hipMalloc and hipMemcpy
hipMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
hipMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
hipMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, hipMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
hipMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
hipMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
hipMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
hipLaunchKernelGGL(( kernelClearImageSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else {
hipLaunchKernelGGL(( kernelClearImage), dim3(gridDim), dim3(blockDim), 0, 0, 1.f, 1.f, 1.f, 1.f);
}
hipDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
hipLaunchKernelGGL(( kernelAdvanceSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == BOUNCING_BALLS) {
hipLaunchKernelGGL(( kernelAdvanceBouncingBalls), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == HYPNOSIS) {
hipLaunchKernelGGL(( kernelAdvanceHypnosis), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == FIREWORKS) {
hipLaunchKernelGGL(( kernelAdvanceFireWorks), dim3(gridDim), dim3(blockDim), 0, 0, );
}
hipDeviceSynchronize();
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
//dim3 blockDim(256, 1);
//dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
short imageHeight = image->height;
dim3 blockDim(16, 16);
// dim3 gridDim((numPixels + blockDim.x - 1) / blockDim.x);
dim3 gridDim(2,2); //dividing it into block -- each block working on a portion of image
hipLaunchKernelGGL(( kernelRenderCircles), dim3(gridDim), dim3(blockDim), 0, 0, imageWidth, imageHeight);
gpuErrchk(hipDeviceSynchronize());
}
| 13c2849ffaa2a86bc4338f6a16d3b702adc91198.cu | #include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "circleBoxTest.cu_inl"
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles(int imageWidth, int imageHeight) {
//TODO: convert short to int
//TODO: can direct get width from const params
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
for (int index = 0; index < cuConstRendererParams.numCircles; index++) {
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
// const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x;
float rad = cuConstRendererParams.radius[index];
// BlockDim = 256 x1, gridDim = 4x4
int circleInBox = circleInBoxConservative(p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
if((threadIdx.x + threadIdx.y)== 0) {
printf("Blk : %dx%d, grid: %d %d\n", blockIdx.x, blockIdx.y, gridDim.x, circleInBox);
printf("circleInBoxConservative p.x : %f, p.y : %f , rad : %f, %f, %f, %f, %f\n",
p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
}
if(circleInBox == 0) { return; }
/*
if((threadIdx.x + threadIdx.y)== 0) {
printf("Blk : %d, grid: %d\n", blockIdx.x, gridDim.x);
}*/
for(int tid_x = threadIdx.x ; tid_x < (imageWidth/gridDim.x); tid_x +=blockDim.x) {
for(int tid_y = threadIdx.y ; tid_y < (imageHeight/gridDim.y); tid_y +=blockDim.y) {
int x = blockIdx.x*(imageWidth/gridDim.x) + tid_x;
int y = blockIdx.y*(imageHeight/gridDim.y) + tid_y;
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
shadePixel(index, pixelCenterNorm, p, imgPtr);
}
}
__syncthreads(); //TODO: is this even needed?
}
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
cudaFree(cudaDevicePosition);
cudaFree(cudaDeviceVelocity);
cudaFree(cudaDeviceColor);
cudaFree(cudaDeviceRadius);
cudaFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
cudaMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
cudaMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// cudaMalloc and cudaMemcpy
cudaMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
cudaMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
cudaMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, cudaMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
cudaMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
cudaMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
cudaMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
kernelClearImageSnowflake<<<gridDim, blockDim>>>();
} else {
kernelClearImage<<<gridDim, blockDim>>>(1.f, 1.f, 1.f, 1.f);
}
cudaDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
kernelAdvanceSnowflake<<<gridDim, blockDim>>>();
} else if (sceneName == BOUNCING_BALLS) {
kernelAdvanceBouncingBalls<<<gridDim, blockDim>>>();
} else if (sceneName == HYPNOSIS) {
kernelAdvanceHypnosis<<<gridDim, blockDim>>>();
} else if (sceneName == FIREWORKS) {
kernelAdvanceFireWorks<<<gridDim, blockDim>>>();
}
cudaDeviceSynchronize();
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
//dim3 blockDim(256, 1);
//dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
short imageHeight = image->height;
dim3 blockDim(16, 16);
// dim3 gridDim((numPixels + blockDim.x - 1) / blockDim.x);
dim3 gridDim(2,2); //dividing it into block -- each block working on a portion of image
kernelRenderCircles<<<gridDim, blockDim>>>(imageWidth, imageHeight);
gpuErrchk(cudaDeviceSynchronize());
}
|
439b6e422ffaa0eb996f707b73f62d853fea155d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layer_include/vision_layers.hpp"
#include "alternative/device_alternative.hpp"
// serial computions are splitted in parallel as
// num*channels*pooling_height*pooling_height units
// it is a highly efficient parallel splitted algorithm
template<typename Dtype>
__global__ void MaxPoolForward(const int n, const Dtype* bottom_data, const int num, const int channels,
const int height, const int width, const int pooling_height, const int pooling_width,
const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* top_data, int* mask, Dtype* top_mask){
CUDA_KERNEL_LOOP(idx, n){
const int pw = idx%pooling_width;
const int ph = (idx / pooling_width) % pooling_height;
const int pc = (idx / pooling_width / pooling_height) % channels;
const int pn = (idx / pooling_width / pooling_height / channels);
int start_h = ph*stride_h - pad_h;
int start_w = pw*stride_w - pad_w;
// clip
const int end_h = min(start_h + kernel_h, height);
const int end_w = min(start_w + kernel_w, width);
start_h = max(start_h, 0);
start_w = max(start_w, 0);
Dtype max_val = -FLT_MAX;
int max_idx = -1;
// base + offset(for num and channels)
// bottom_ptr pointer to a bottom map's base address
const Dtype* bottom_ptr = bottom_data + (pn*channels + pc)*height*width;
// scan for the max val
for (int h = start_h; h < end_h; h++){
for (int w = start_w; w < end_w; w++){
if (bottom_ptr[h*width+w] > max_val){
max_idx = h*width + w;
max_val = bottom_ptr[max_idx];
}
}
}
top_data[idx] = max_val;
if (mask) mask[idx] = max_idx;
else top_mask[idx] = max_idx;
}
}
template<typename Dtype>
__global__ void AvgPoolForward(const int n, const Dtype* bottom_data, const int num, const int channels,
const int height, const int width, const int pooling_height, const int pooling_width,
const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* top_data){
CUDA_KERNEL_LOOP(idx, n){
const int pw = idx%pooling_width;
const int ph = (idx / pooling_width) % pooling_height;
const int pc = (idx / pooling_width / pooling_height) % channels;
const int pn = (idx / pooling_width / pooling_height / channels);
int start_h = ph*stride_h - pad_h;
int start_w = pw*stride_w - pad_w;
int end_h = min(start_h + kernel_h, height + pad_h);
int end_w = min(start_w + kernel_w, width + pad_w);
const int pooling_size = (end_h - start_h)*(end_w - start_w);
// clip
start_h = max(start_h, 0);
start_w = max(start_w, 0);
end_h = min(end_h, height);
end_w = min(end_w, width);
// base + offset(for num and channels)
// bottom_ptr pointer to a bottom map's base address
const Dtype* bottom_ptr = bottom_data + (pn*channels + pc)*height*width;
Dtype avg_val = 0;
// scan for the max val
for (int h = start_h; h < end_h; h++)
for (int w = start_w; w < end_w; w++)
avg_val+=bottom_ptr[h*width+w];
top_data[idx] = avg_val / pooling_size;
}
}
template<typename Dtype>
void PoolingLayer<Dtype>::forward_gpu(const vector<Blob<Dtype>*> &bottom, const vector<Blob<Dtype>*> &top){
PoolingParameter pool_param = param.pooling_param();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int top_count = top[0]->count();
const bool use_top_mask = top.size() > 1;
int *mask = NULL;
Dtype *top_mask = NULL;
switch (pool_param.method()){
case PoolingParameter_Method_MAX:
if (use_top_mask) top_mask = top[1]->mutable_gpu_data();
else mask = max_idx.mutable_gpu_data();
MaxPoolForward<Dtype> << <GET_BLOCKS(top_count), CUDA_NUM_THREADS >> >(
top_count, bottom_data, bottom[0]->num(), channels, height, width,
pooling_height, pooling_width, kernel_h, kernel_w, stride_h, stride_w,
pad_h, pad_w, top_data, mask, top_mask);
break;
case PoolingParameter_Method_AVG:
AvgPoolForward<Dtype> << <GET_BLOCKS(top_count), CUDA_NUM_THREADS >> >(
top_count, bottom_data, bottom[0]->num(), channels, height, width,
pooling_height, pooling_width, kernel_h, kernel_w, stride_h, stride_w,
pad_h, pad_w, top_data);
break;
case PoolingParameter_Method_STOCHASTIC:
NOT_IMPLEMENTED;
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template<typename Dtype>
__global__ void MaxPoolBackward(const int n, const Dtype* top_diff, const int num, const int channels,
const int height, const int width, const int pooling_height, const int pooling_width,
const int kernel_h, const int kernel_w, const int stride_h, const int stride_w,
const int pad_h, const int pad_w, Dtype* bottom_diff, const int* mask, const Dtype* top_mask){
CUDA_KERNEL_LOOP(idx, n){
const int w = idx%width;
const int h = (idx / width) % height;
const int c = (idx / width / height) % channels;
const int n = idx / width / height / channels;
// allow overlapping
const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int start_pw = (w + pad_w<kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
// allow clip
const int end_ph = min((h + pad_h) / stride_h + 1, pooling_height);
const int end_pw = min((w + pad_w) / stride_w + 1, pooling_width);
Dtype diff = 0;
const int offset = (n*channels + c)*pooling_height*pooling_width;
const Dtype* top_ptr = top_diff + offset;
if (mask){
const int* mask_ptr = mask + offset;
for (int ph = start_ph; ph < end_ph; ph++)
for (int pw = start_pw; pw < end_pw; pw++)
if (mask_ptr[ph*pooling_width + pw] == (h*width + w))
diff += top_ptr[ph*pooling_width + pw];
}else{
const Dtype* mask_ptr = top_mask + offset;
for (int ph = start_ph; ph < end_ph; ph++)
for (int pw = start_pw; pw < end_pw; pw++)
if (mask_ptr[ph*pooling_width + pw] == (h*width + w))
diff += top_ptr[ph*pooling_width + pw];
}
bottom_diff[idx] = diff;
}
}
template<typename Dtype>
__global__ void AvgPoolBackward(const int n, const Dtype* top_diff, const int num, const int channels,
const int height, const int width, const int pooling_height, const int pooling_width,
const int kernel_h, const int kernel_w, const int stride_h, const int stride_w,
const int pad_h, const int pad_w, Dtype* bottom_diff){
CUDA_KERNEL_LOOP(idx, n){
const int w = idx%width;
const int h = (idx / width) % height;
const int c = (idx / width / height) % channels;
const int n = idx / width / height / channels;
// allow overlapping
const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int start_pw = (w + pad_w<kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
// allow clip
// note that use 'h / stride_h + 1' but not '(h + pad_h) / stride_h + 1'
// will ignore pad when average(???)
const int end_ph = min(h / stride_h + 1, pooling_height);
const int end_pw = min(w / stride_w + 1, pooling_width);
Dtype diff = 0;
const Dtype* top_ptr = top_diff+(n*channels + c)*pooling_height*pooling_width;
for (int ph = start_ph; ph < end_ph; ph++)
for (int pw = start_pw; pw < end_pw; pw++){
// must compute pooling size
int start_h = ph*stride_h - pad_h;
int start_w = pw*stride_w - pad_w;
int end_h = min(start_h + kernel_h, height + pad_h);
int end_w = min(start_w + kernel_w, width + pad_w);
int pooling_size = (end_h - start_h)*(end_w - start_w);
diff += (top_ptr[ph*pooling_width + pw] / pooling_size);
}
bottom_diff[idx] = diff;
}
}
template<typename Dtype>
void PoolingLayer<Dtype>::backward_gpu(const vector<Blob<Dtype>*> &top,
const vector<bool> &data_need_bp, const vector<Blob<Dtype>*> &bottom){
// pooling layer only compute data_diff
if (!data_need_bp[0]) return;
PoolingParameter pool_param = param.pooling_param();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (pool_param.method()){
case PoolingParameter_Method_MAX:
if (use_top_mask) top_mask = top[1]->gpu_data();
else mask = max_idx.gpu_data();
MaxPoolBackward<Dtype> << <GET_BLOCKS(bottom_count), CUDA_NUM_THREADS >> >(
bottom_count, top_diff, bottom[0]->num(), channels, height, width,
pooling_height, pooling_width, kernel_h, kernel_w, stride_h, stride_w,
pad_h, pad_w, bottom_diff, mask, top_mask);
break;
case PoolingParameter_Method_AVG:
AvgPoolBackward<Dtype> << <GET_BLOCKS(bottom_count), CUDA_NUM_THREADS >> >(
bottom_count, top_diff, bottom[0]->num(), channels, height, width,
pooling_height, pooling_width, kernel_h, kernel_w, stride_h, stride_w,
pad_h, pad_w, bottom_diff);
break;
case PoolingParameter_Method_STOCHASTIC:
NOT_IMPLEMENTED;
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); | 439b6e422ffaa0eb996f707b73f62d853fea155d.cu | #include "layer_include/vision_layers.hpp"
#include "alternative/device_alternative.hpp"
// serial computions are splitted in parallel as
// num*channels*pooling_height*pooling_height units
// it is a highly efficient parallel splitted algorithm
template<typename Dtype>
__global__ void MaxPoolForward(const int n, const Dtype* bottom_data, const int num, const int channels,
const int height, const int width, const int pooling_height, const int pooling_width,
const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* top_data, int* mask, Dtype* top_mask){
CUDA_KERNEL_LOOP(idx, n){
const int pw = idx%pooling_width;
const int ph = (idx / pooling_width) % pooling_height;
const int pc = (idx / pooling_width / pooling_height) % channels;
const int pn = (idx / pooling_width / pooling_height / channels);
int start_h = ph*stride_h - pad_h;
int start_w = pw*stride_w - pad_w;
// clip
const int end_h = min(start_h + kernel_h, height);
const int end_w = min(start_w + kernel_w, width);
start_h = max(start_h, 0);
start_w = max(start_w, 0);
Dtype max_val = -FLT_MAX;
int max_idx = -1;
// base + offset(for num and channels)
// bottom_ptr pointer to a bottom map's base address
const Dtype* bottom_ptr = bottom_data + (pn*channels + pc)*height*width;
// scan for the max val
for (int h = start_h; h < end_h; h++){
for (int w = start_w; w < end_w; w++){
if (bottom_ptr[h*width+w] > max_val){
max_idx = h*width + w;
max_val = bottom_ptr[max_idx];
}
}
}
top_data[idx] = max_val;
if (mask) mask[idx] = max_idx;
else top_mask[idx] = max_idx;
}
}
template<typename Dtype>
__global__ void AvgPoolForward(const int n, const Dtype* bottom_data, const int num, const int channels,
const int height, const int width, const int pooling_height, const int pooling_width,
const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* top_data){
CUDA_KERNEL_LOOP(idx, n){
const int pw = idx%pooling_width;
const int ph = (idx / pooling_width) % pooling_height;
const int pc = (idx / pooling_width / pooling_height) % channels;
const int pn = (idx / pooling_width / pooling_height / channels);
int start_h = ph*stride_h - pad_h;
int start_w = pw*stride_w - pad_w;
int end_h = min(start_h + kernel_h, height + pad_h);
int end_w = min(start_w + kernel_w, width + pad_w);
const int pooling_size = (end_h - start_h)*(end_w - start_w);
// clip
start_h = max(start_h, 0);
start_w = max(start_w, 0);
end_h = min(end_h, height);
end_w = min(end_w, width);
// base + offset(for num and channels)
// bottom_ptr pointer to a bottom map's base address
const Dtype* bottom_ptr = bottom_data + (pn*channels + pc)*height*width;
Dtype avg_val = 0;
// scan for the max val
for (int h = start_h; h < end_h; h++)
for (int w = start_w; w < end_w; w++)
avg_val+=bottom_ptr[h*width+w];
top_data[idx] = avg_val / pooling_size;
}
}
template<typename Dtype>
void PoolingLayer<Dtype>::forward_gpu(const vector<Blob<Dtype>*> &bottom, const vector<Blob<Dtype>*> &top){
PoolingParameter pool_param = param.pooling_param();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int top_count = top[0]->count();
const bool use_top_mask = top.size() > 1;
int *mask = NULL;
Dtype *top_mask = NULL;
switch (pool_param.method()){
case PoolingParameter_Method_MAX:
if (use_top_mask) top_mask = top[1]->mutable_gpu_data();
else mask = max_idx.mutable_gpu_data();
MaxPoolForward<Dtype> << <GET_BLOCKS(top_count), CUDA_NUM_THREADS >> >(
top_count, bottom_data, bottom[0]->num(), channels, height, width,
pooling_height, pooling_width, kernel_h, kernel_w, stride_h, stride_w,
pad_h, pad_w, top_data, mask, top_mask);
break;
case PoolingParameter_Method_AVG:
AvgPoolForward<Dtype> << <GET_BLOCKS(top_count), CUDA_NUM_THREADS >> >(
top_count, bottom_data, bottom[0]->num(), channels, height, width,
pooling_height, pooling_width, kernel_h, kernel_w, stride_h, stride_w,
pad_h, pad_w, top_data);
break;
case PoolingParameter_Method_STOCHASTIC:
NOT_IMPLEMENTED;
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template<typename Dtype>
__global__ void MaxPoolBackward(const int n, const Dtype* top_diff, const int num, const int channels,
const int height, const int width, const int pooling_height, const int pooling_width,
const int kernel_h, const int kernel_w, const int stride_h, const int stride_w,
const int pad_h, const int pad_w, Dtype* bottom_diff, const int* mask, const Dtype* top_mask){
CUDA_KERNEL_LOOP(idx, n){
const int w = idx%width;
const int h = (idx / width) % height;
const int c = (idx / width / height) % channels;
const int n = idx / width / height / channels;
// allow overlapping
const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int start_pw = (w + pad_w<kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
// allow clip
const int end_ph = min((h + pad_h) / stride_h + 1, pooling_height);
const int end_pw = min((w + pad_w) / stride_w + 1, pooling_width);
Dtype diff = 0;
const int offset = (n*channels + c)*pooling_height*pooling_width;
const Dtype* top_ptr = top_diff + offset;
if (mask){
const int* mask_ptr = mask + offset;
for (int ph = start_ph; ph < end_ph; ph++)
for (int pw = start_pw; pw < end_pw; pw++)
if (mask_ptr[ph*pooling_width + pw] == (h*width + w))
diff += top_ptr[ph*pooling_width + pw];
}else{
const Dtype* mask_ptr = top_mask + offset;
for (int ph = start_ph; ph < end_ph; ph++)
for (int pw = start_pw; pw < end_pw; pw++)
if (mask_ptr[ph*pooling_width + pw] == (h*width + w))
diff += top_ptr[ph*pooling_width + pw];
}
bottom_diff[idx] = diff;
}
}
template<typename Dtype>
__global__ void AvgPoolBackward(const int n, const Dtype* top_diff, const int num, const int channels,
const int height, const int width, const int pooling_height, const int pooling_width,
const int kernel_h, const int kernel_w, const int stride_h, const int stride_w,
const int pad_h, const int pad_w, Dtype* bottom_diff){
CUDA_KERNEL_LOOP(idx, n){
const int w = idx%width;
const int h = (idx / width) % height;
const int c = (idx / width / height) % channels;
const int n = idx / width / height / channels;
// allow overlapping
const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int start_pw = (w + pad_w<kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
// allow clip
// note that use 'h / stride_h + 1' but not '(h + pad_h) / stride_h + 1'
// will ignore pad when average(???)
const int end_ph = min(h / stride_h + 1, pooling_height);
const int end_pw = min(w / stride_w + 1, pooling_width);
Dtype diff = 0;
const Dtype* top_ptr = top_diff+(n*channels + c)*pooling_height*pooling_width;
for (int ph = start_ph; ph < end_ph; ph++)
for (int pw = start_pw; pw < end_pw; pw++){
// must compute pooling size
int start_h = ph*stride_h - pad_h;
int start_w = pw*stride_w - pad_w;
int end_h = min(start_h + kernel_h, height + pad_h);
int end_w = min(start_w + kernel_w, width + pad_w);
int pooling_size = (end_h - start_h)*(end_w - start_w);
diff += (top_ptr[ph*pooling_width + pw] / pooling_size);
}
bottom_diff[idx] = diff;
}
}
template<typename Dtype>
void PoolingLayer<Dtype>::backward_gpu(const vector<Blob<Dtype>*> &top,
const vector<bool> &data_need_bp, const vector<Blob<Dtype>*> &bottom){
// pooling layer only compute data_diff
if (!data_need_bp[0]) return;
PoolingParameter pool_param = param.pooling_param();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (pool_param.method()){
case PoolingParameter_Method_MAX:
if (use_top_mask) top_mask = top[1]->gpu_data();
else mask = max_idx.gpu_data();
MaxPoolBackward<Dtype> << <GET_BLOCKS(bottom_count), CUDA_NUM_THREADS >> >(
bottom_count, top_diff, bottom[0]->num(), channels, height, width,
pooling_height, pooling_width, kernel_h, kernel_w, stride_h, stride_w,
pad_h, pad_w, bottom_diff, mask, top_mask);
break;
case PoolingParameter_Method_AVG:
AvgPoolBackward<Dtype> << <GET_BLOCKS(bottom_count), CUDA_NUM_THREADS >> >(
bottom_count, top_diff, bottom[0]->num(), channels, height, width,
pooling_height, pooling_width, kernel_h, kernel_w, stride_h, stride_w,
pad_h, pad_w, bottom_diff);
break;
case PoolingParameter_Method_STOCHASTIC:
NOT_IMPLEMENTED;
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); |
2f926c3483f58efd41ae1725667bbdaf4a894805.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/native/hip/FlashNeuron.h>
#include <ATen/Context.h>
#define find(n) (32 * (unsigned int)(n / 1024) + (n % 32))
#define mask(n) (0x80000000 >> (unsigned int)((n % 1024) / 32))
#define FLAG_OFFLOAD (1U << 0)
#define FLAG_FP16 (1U << 1)
#define FLAG_CSR (1U << 2)
#define FLAG_SSD (1U << 3)
#define FLAG_TESLA (1U << 4)
#define FLAG_RAID0 (1U << 5)
#define FLAG_DEBUG (1U << 6)
// 7~11 bit will be used for arc_vm (device) cudamalloc size
#define DEVSIZE_MASK (0x00000F80)
// 12~16 bit will be used for arc_vm (p2p) cudamalloc size
#define TMPSIZE_MASK (0x0001F000)
#define FLAG_TIMER (1U << 17)
#define BLK_SIZE ((size_t)1 << 12)
namespace at {
namespace native {
using namespace at::cuda;
FN_manager FN_mngt;
FN_manager::FN_manager():isTimer(false), isOffload(false), isFP16(false), isCSR(false),
isUsingSSD(false), isTesla(false), isDebug(false),
device_size(0), max_device(0), temporal_size(0), max_temporal(0),
global_tID(-1), global_oID(-1) {
}
FN_manager::~FN_manager() {
}
void FN_manager::setting(int flags) {
uint64_t device_in_gb;
device_in_gb = (flags & DEVSIZE_MASK) >> 7;
device_size = device_in_gb << 30;
max_device = device_size / BLK_SIZE;
uint64_t temporal_in_gb;
temporal_in_gb = (flags & TMPSIZE_MASK) >> 12;
temporal_size = temporal_in_gb << 30;
max_temporal = temporal_size / BLK_SIZE;
if (device_in_gb > 0) {
device_table = new short[max_device];
memset(device_table, 0, sizeof(short) * max_device);
device_page_map = new unsigned int[max_device];
memset(device_page_map, 0, sizeof(unsigned int) * max_device);
}
if (temporal_in_gb > 0) {
temporal_table = new short[max_temporal];
memset(temporal_table, 0, sizeof(short) * max_temporal);
temporal_page_map = new unsigned int[max_temporal];
memset(temporal_page_map, 0, sizeof(unsigned int) * max_temporal);
}
if (flags & FLAG_TIMER) {
printf("Timer profiler set\n");
isTimer = true;
}
if (flags & FLAG_OFFLOAD) {
printf("Offload flag set\n");
isOffload = true;
}
if (flags & FLAG_FP16) {
printf("FP16 flag set\n");
isOffload = true;
isFP16 = true;
}
if (flags & FLAG_CSR) {
printf("CSR flag set\n");
isOffload = true;
isCSR = true;
}
if (flags & FLAG_TESLA) {
printf("Tesla GPU flag set\n");
isTesla = true;
}
if (flags & FLAG_DEBUG) {
printf("Debug mode on\n");
isDebug = true;
}
if (flags & FLAG_SSD) {
printf("SSD flag set\n");
isOffload = true;
isUsingSSD = true;
}
}
// Operation ID assignment
int FN_manager::getOid() { return global_oID; }
void FN_manager::setOid() { global_oID++; }
void FN_manager::resetOid() { global_oID = -1; }
// Tensor ID assignment
int FN_manager::getTid() { return global_tID; }
void FN_manager::setTid() { global_tID++; }
void FN_manager::resetTid() { global_tID = -1; }
// Flag check functions
bool FN_manager::is_timer() { return isTimer; }
bool FN_manager::is_offload() { return isOffload; }
bool FN_manager::is_fp16() { return isFP16; }
bool FN_manager::is_csr() { return isCSR; }
bool FN_manager::is_using_ssd() { return isUsingSSD; }
bool FN_manager::is_debug() { return isDebug; }
}} // at::native
| 2f926c3483f58efd41ae1725667bbdaf4a894805.cu | #include <ATen/native/cuda/FlashNeuron.h>
#include <ATen/Context.h>
#define find(n) (32 * (unsigned int)(n / 1024) + (n % 32))
#define mask(n) (0x80000000 >> (unsigned int)((n % 1024) / 32))
#define FLAG_OFFLOAD (1U << 0)
#define FLAG_FP16 (1U << 1)
#define FLAG_CSR (1U << 2)
#define FLAG_SSD (1U << 3)
#define FLAG_TESLA (1U << 4)
#define FLAG_RAID0 (1U << 5)
#define FLAG_DEBUG (1U << 6)
// 7~11 bit will be used for arc_vm (device) cudamalloc size
#define DEVSIZE_MASK (0x00000F80)
// 12~16 bit will be used for arc_vm (p2p) cudamalloc size
#define TMPSIZE_MASK (0x0001F000)
#define FLAG_TIMER (1U << 17)
#define BLK_SIZE ((size_t)1 << 12)
namespace at {
namespace native {
using namespace at::cuda;
FN_manager FN_mngt;
FN_manager::FN_manager():isTimer(false), isOffload(false), isFP16(false), isCSR(false),
isUsingSSD(false), isTesla(false), isDebug(false),
device_size(0), max_device(0), temporal_size(0), max_temporal(0),
global_tID(-1), global_oID(-1) {
}
FN_manager::~FN_manager() {
}
void FN_manager::setting(int flags) {
uint64_t device_in_gb;
device_in_gb = (flags & DEVSIZE_MASK) >> 7;
device_size = device_in_gb << 30;
max_device = device_size / BLK_SIZE;
uint64_t temporal_in_gb;
temporal_in_gb = (flags & TMPSIZE_MASK) >> 12;
temporal_size = temporal_in_gb << 30;
max_temporal = temporal_size / BLK_SIZE;
if (device_in_gb > 0) {
device_table = new short[max_device];
memset(device_table, 0, sizeof(short) * max_device);
device_page_map = new unsigned int[max_device];
memset(device_page_map, 0, sizeof(unsigned int) * max_device);
}
if (temporal_in_gb > 0) {
temporal_table = new short[max_temporal];
memset(temporal_table, 0, sizeof(short) * max_temporal);
temporal_page_map = new unsigned int[max_temporal];
memset(temporal_page_map, 0, sizeof(unsigned int) * max_temporal);
}
if (flags & FLAG_TIMER) {
printf("Timer profiler set\n");
isTimer = true;
}
if (flags & FLAG_OFFLOAD) {
printf("Offload flag set\n");
isOffload = true;
}
if (flags & FLAG_FP16) {
printf("FP16 flag set\n");
isOffload = true;
isFP16 = true;
}
if (flags & FLAG_CSR) {
printf("CSR flag set\n");
isOffload = true;
isCSR = true;
}
if (flags & FLAG_TESLA) {
printf("Tesla GPU flag set\n");
isTesla = true;
}
if (flags & FLAG_DEBUG) {
printf("Debug mode on\n");
isDebug = true;
}
if (flags & FLAG_SSD) {
printf("SSD flag set\n");
isOffload = true;
isUsingSSD = true;
}
}
// Operation ID assignment
int FN_manager::getOid() { return global_oID; }
void FN_manager::setOid() { global_oID++; }
void FN_manager::resetOid() { global_oID = -1; }
// Tensor ID assignment
int FN_manager::getTid() { return global_tID; }
void FN_manager::setTid() { global_tID++; }
void FN_manager::resetTid() { global_tID = -1; }
// Flag check functions
bool FN_manager::is_timer() { return isTimer; }
bool FN_manager::is_offload() { return isOffload; }
bool FN_manager::is_fp16() { return isFP16; }
bool FN_manager::is_csr() { return isCSR; }
bool FN_manager::is_using_ssd() { return isUsingSSD; }
bool FN_manager::is_debug() { return isDebug; }
}} // at::native
|
0e14c527617e3290f26423315e084f8ed2f98aa5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 3
#define M 2
__global__ void add(int *a, int *b, int *c)
{
int tid = threadIdx.x;
// if(tid < N)
for(int i = 0; i < N; i++)
c[tid * N + i] = a[tid * N + i] + b[tid * N + i];
}
int main()
{
// int *a, *b, *c;
int a[M * N], b[M * N], c[M * N];
// host copies of variables a, b & c
int *d_a, *d_b, *d_c;
// device copies of variables a, b & c
int size = sizeof(int) * M * N;
// a = (int *)malloc(sizeof(int) * N);
// b = (int *)malloc(sizeof(int) * N);
// c = (int *)malloc(sizeof(int) * N);
// Allocate space for device copies a, b & c
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
// Setup input values
printf("Enter values for a: ");
for(int i = 0; i < M; i++)
for(int j = 0; j < N; j++)
scanf("%d", &a[i * N + j]);
printf("Enter values for b: ");
for(int i = 0; i < M; i++)
for(int j = 0; j < N; j++)
scanf("%d", &b[i * N + j]);
// Copy inputs to device
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(1), dim3(N), 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
// print result
for(int i = 0; i < M; i++)
{
for(int j = 0; j < N; j++)
printf("%d + %d = %d\n", a[i * N + j], b[i * N + j], c[i * N + j]);
}
// Cleanup
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 0e14c527617e3290f26423315e084f8ed2f98aa5.cu | #include <stdio.h>
#define N 3
#define M 2
__global__ void add(int *a, int *b, int *c)
{
int tid = threadIdx.x;
// if(tid < N)
for(int i = 0; i < N; i++)
c[tid * N + i] = a[tid * N + i] + b[tid * N + i];
}
int main()
{
// int *a, *b, *c;
int a[M * N], b[M * N], c[M * N];
// host copies of variables a, b & c
int *d_a, *d_b, *d_c;
// device copies of variables a, b & c
int size = sizeof(int) * M * N;
// a = (int *)malloc(sizeof(int) * N);
// b = (int *)malloc(sizeof(int) * N);
// c = (int *)malloc(sizeof(int) * N);
// Allocate space for device copies a, b & c
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
// Setup input values
printf("Enter values for a: ");
for(int i = 0; i < M; i++)
for(int j = 0; j < N; j++)
scanf("%d", &a[i * N + j]);
printf("Enter values for b: ");
for(int i = 0; i < M; i++)
for(int j = 0; j < N; j++)
scanf("%d", &b[i * N + j]);
// Copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<1, N>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
// print result
for(int i = 0; i < M; i++)
{
for(int j = 0; j < N; j++)
printf("%d + %d = %d\n", a[i * N + j], b[i * N + j], c[i * N + j]);
}
// Cleanup
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
509c49c01a8ff1fed1bf9f95d20e3ae0c97e0df4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Equihash CUDA solver
// Copyright (c) 2016 John Tromp
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#define htole32(x) (x)
#define HAVE_DECL_HTOLE32 1
#include "../cpu_tromp/equi.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <functional>
#include <vector>
#include <iostream>
#include "eqcuda.hpp"
#include "blake2b.cu"
typedef uint16_t u16;
typedef uint64_t u64;
#ifndef RESTBITS
#define RESTBITS 4
#endif
// 2_log of number of buckets
#define BUCKBITS (DIGITBITS-RESTBITS)
#ifndef SAVEMEM
#if RESTBITS == 4
// can't save memory in such small buckets
#define SAVEMEM 1
#elif RESTBITS >= 8
// take advantage of law of large numbers (sum of 2^8 random numbers)
// this reduces (200,9) memory to under 144MB, with negligible discarding
#define SAVEMEM 9/14
#endif
#endif
// number of buckets
static const u32 NBUCKETS = 1 << BUCKBITS;
// 2_log of number of slots per bucket
static const u32 SLOTBITS = RESTBITS + 1 + 1;
static const u32 SLOTRANGE = 1 << SLOTBITS;
// number of slots per bucket
static const u32 NSLOTS = SLOTRANGE * SAVEMEM;
// SLOTBITS mask
static const u32 SLOTMASK = SLOTRANGE - 1;
// number of possible values of xhash (rest of n) bits
static const u32 NRESTS = 1 << RESTBITS;
// RESTBITS mask
static const u32 RESTMASK = NRESTS - 1;
// number of blocks of hashes extracted from single 512 bit blake2b output
static const u32 NBLOCKS = (NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE;
// nothing larger found in 100000 runs
static const u32 MAXSOLS = 10;
// tree node identifying its children as two different slots in
// a bucket on previous layer with the same rest bits (x-tra hash)
struct tree {
u32 bid_s0_s1_x; // manual bitfields
__device__ tree(const u32 idx, const u32 xh) {
bid_s0_s1_x = idx << RESTBITS | xh;
}
__device__ tree(const u32 idx) {
bid_s0_s1_x = idx;
}
#ifdef XINTREE
__device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) {
bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh;
#else
__device__ tree(const u32 bid, const u32 s0, const u32 s1) {
bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1;
#endif
}
__device__ u32 getindex() const {
#ifdef XINTREE
return bid_s0_s1_x >> RESTBITS;
#else
return bid_s0_s1_x;
#endif
}
__device__ u32 bucketid() const {
#ifdef XINTREE
return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS);
#else
return bid_s0_s1_x >> (2 * SLOTBITS);
#endif
}
__device__ u32 slotid0() const {
#ifdef XINTREE
return (bid_s0_s1_x >> SLOTBITS + RESTBITS) & SLOTMASK;
#else
return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK;
#endif
}
__device__ u32 slotid1() const {
#ifdef XINTREE
return (bid_s0_s1_x >> RESTBITS) & SLOTMASK;
#else
return bid_s0_s1_x & SLOTMASK;
#endif
}
__device__ u32 xhash() const {
return bid_s0_s1_x & RESTMASK;
}
__device__ bool prob_disjoint(const tree other) const {
tree xort(bid_s0_s1_x ^ other.bid_s0_s1_x);
return xort.bucketid() || (xort.slotid0() && xort.slotid1());
// next two tests catch much fewer cases and are therefore skipped
// && slotid0() != other.slotid1() && slotid1() != other.slotid0()
}
};
union hashunit {
u32 word;
uchar bytes[sizeof(u32)];
};
#define WORDS(bits) ((bits + 31) / 32)
#define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS)
#define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS)
struct slot0 {
tree attr;
hashunit hash[HASHWORDS0];
};
struct slot1 {
tree attr;
hashunit hash[HASHWORDS1];
};
// a bucket is NSLOTS treenodes
typedef slot0 bucket0[NSLOTS];
typedef slot1 bucket1[NSLOTS];
// the N-bit hash consists of K+1 n-bit "digits"
// each of which corresponds to a layer of NBUCKETS buckets
typedef bucket0 digit0[NBUCKETS];
typedef bucket1 digit1[NBUCKETS];
// size (in bytes) of hash in round 0 <= r < WK
u32 hhashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
// size (in bytes) of hash in round 0 <= r < WK
__device__ u32 hashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
u32 hhashwords(u32 bytes) {
return (bytes + 3) / 4;
}
__device__ u32 hashwords(u32 bytes) {
return (bytes + 3) / 4;
}
// manages hash and tree data
struct htalloc {
bucket0 *trees0[(WK + 1) / 2];
bucket1 *trees1[WK / 2];
};
typedef u32 bsizes[NBUCKETS];
struct equi {
blake2b_state blake_ctx;
htalloc hta;
bsizes *nslots;
proof *sols;
u32 nsols;
u32 nthreads;
equi(const u32 n_threads) {
nthreads = n_threads;
}
void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) {
setheader(&blake_ctx, header, len, nonce, nlen);
checkCudaErrors(hipMemset(nslots, 0, NBUCKETS * sizeof(u32)));
nsols = 0;
}
__device__ u32 getnslots0(const u32 bid) {
u32 &nslot = nslots[0][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ u32 getnslots1(const u32 bid) {
u32 &nslot = nslots[1][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ bool orderindices(u32 *indices, u32 size) {
if (indices[0] > indices[size]) {
for (u32 i = 0; i < size; i++) {
const u32 tmp = indices[i];
indices[i] = indices[size + i];
indices[size + i] = tmp;
}
}
return false;
}
__device__ bool listindices1(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[0][t.bucketid()];
const u32 size = 1 << 0;
indices[0] = buck[t.slotid0()].attr.getindex();
indices[size] = buck[t.slotid1()].attr.getindex();
orderindices(indices, size);
return false;
}
__device__ bool listindices2(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[0][t.bucketid()];
const u32 size = 1 << 1;
return listindices1(buck[t.slotid0()].attr, indices) ||
listindices1(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices3(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[1][t.bucketid()];
const u32 size = 1 << 2;
return listindices2(buck[t.slotid0()].attr, indices) ||
listindices2(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices4(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[1][t.bucketid()];
const u32 size = 1 << 3;
return listindices3(buck[t.slotid0()].attr, indices) ||
listindices3(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices5(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[2][t.bucketid()];
const u32 size = 1 << 4;
return listindices4(buck[t.slotid0()].attr, indices) ||
listindices4(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices6(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[2][t.bucketid()];
const u32 size = 1 << 5;
return listindices5(buck[t.slotid0()].attr, indices) ||
listindices5(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices7(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[3][t.bucketid()];
const u32 size = 1 << 6;
return listindices6(buck[t.slotid0()].attr, indices) ||
listindices6(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices8(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[3][t.bucketid()];
const u32 size = 1 << 7;
return listindices7(buck[t.slotid0()].attr, indices) ||
listindices7(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices9(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[4][t.bucketid()];
const u32 size = 1 << 8;
return listindices8(buck[t.slotid0()].attr, indices) ||
listindices8(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ void candidate(const tree t) {
proof prf;
#if WK==9
if (listindices9(t, prf)) return;
#elif WK==7
if (listindices7(t, prf)) return;
#elif WK==5
if (listindices5(t, prf)) return;
#else
#error not implemented
#endif
u32 soli = atomicAdd(&nsols, 1);
if (soli < MAXSOLS)
#if WK==9
listindices9(t, sols[soli]);
#elif WK==7
listindices7(t, sols[soli]);
#elif WK==5
listindices5(t, sols[soli]);
#else
#error not implemented
#endif
}
void showbsizes(u32 r) {
#if defined(HIST) || defined(SPARK) || defined(LOGSPARK)
u32 ns[NBUCKETS];
checkCudaErrors(hipMemcpy(ns, nslots[r & 1], NBUCKETS * sizeof(u32), hipMemcpyDeviceToHost));
u32 binsizes[65];
memset(binsizes, 0, 65 * sizeof(u32));
for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) {
u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS - 6);
binsizes[bsize]++;
}
for (u32 i = 0; i < 65; i++) {
#ifdef HIST
printf(" %d:%d", i, binsizes[i]);
#else
#ifdef SPARK
u32 sparks = binsizes[i] / SPARKSCALE;
#else
u32 sparks = 0;
for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++;
sparks = sparks * 7 / SPARKSCALE;
#endif
printf("\342\226%c", '\201' + sparks);
#endif
}
printf("\n");
#endif
}
struct htlayout {
htalloc hta;
u32 prevhashunits;
u32 nexthashunits;
u32 dunits;
u32 prevbo;
u32 nextbo;
__device__ htlayout(equi *eq, u32 r) : hta(eq->hta), prevhashunits(0), dunits(0) {
u32 nexthashbytes = hashsize(r);
nexthashunits = hashwords(nexthashbytes);
prevbo = 0;
nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3
if (r) {
u32 prevhashbytes = hashsize(r - 1);
prevhashunits = hashwords(prevhashbytes);
prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3
dunits = prevhashunits - nexthashunits;
}
}
__device__ u32 getxhash0(const slot0* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif DIGITBITS % 8 == 4 && RESTBITS == 4
return pslot->hash->bytes[prevbo] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 6
return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo + 1] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 8
return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo + 1] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 10
return (pslot->hash->bytes[prevbo] & 0x3f) << 4 | pslot->hash->bytes[prevbo + 1] >> 4;
#elif DIGITBITS % 8 == 0 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif RESTBITS == 0
return 0;
#else
#error non implemented
#endif
}
__device__ u32 getxhash1(const slot1* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif DIGITBITS % 4 == 0 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif DIGITBITS % 4 == 0 && RESTBITS == 6
return pslot->hash->bytes[prevbo] & 0x3f;
#elif DIGITBITS % 4 == 0 && RESTBITS == 8
return pslot->hash->bytes[prevbo];
#elif DIGITBITS % 4 == 0 && RESTBITS == 10
return (pslot->hash->bytes[prevbo] & 0x3) << 8 | pslot->hash->bytes[prevbo + 1];
#elif RESTBITS == 0
return 0;
#else
#error non implemented
#endif
}
__device__ bool equal(const hashunit *hash0, const hashunit *hash1) const {
return hash0[prevhashunits - 1].word == hash1[prevhashunits - 1].word;
}
};
struct collisiondata {
#ifdef XBITMAP
#if NSLOTS > 64
#error cant use XBITMAP with more than 64 slots
#endif
u64 xhashmap[NRESTS];
u64 xmap;
#else
#if RESTBITS <= 6
typedef uchar xslot;
#else
typedef u16 xslot;
#endif
static const xslot xnil = ~0;
xslot xhashslots[NRESTS];
xslot nextxhashslot[NSLOTS];
xslot nextslot;
#endif
u32 s0;
__device__ void clear() {
#ifdef XBITMAP
memset(xhashmap, 0, NRESTS * sizeof(u64));
#else
memset(xhashslots, xnil, NRESTS * sizeof(xslot));
memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot));
#endif
}
__device__ void addslot(u32 s1, u32 xh) {
#ifdef XBITMAP
xmap = xhashmap[xh];
xhashmap[xh] |= (u64)1 << s1;
s0 = ~0;
#else
nextslot = xhashslots[xh];
nextxhashslot[s1] = nextslot;
xhashslots[xh] = s1;
#endif
}
__device__ bool nextcollision() const {
#ifdef XBITMAP
return xmap != 0;
#else
return nextslot != xnil;
#endif
}
__device__ u32 slot() {
#ifdef XBITMAP
const u32 ffs = __ffsll(xmap);
s0 += ffs; xmap >>= ffs;
#else
nextslot = nextxhashslot[s0 = nextslot];
#endif
return s0;
}
};
};
__global__ void digitH(equi *eq) {
uchar hash[HASHOUT];
blake2b_state state;
equi::htlayout htl(eq, 0);
const u32 hashbytes = hashsize(0); // always 23 ?
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 block = id; block < NBLOCKS; block += eq->nthreads) {
state = eq->blake_ctx;
blake2b_gpu_hash(&state, block, hash, HASHOUT);
for (u32 i = 0; i<HASHESPERBLAKE; i++) {
const uchar *ph = hash + i * WN / 8;
#if BUCKBITS == 16 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 8) | ph[1];
#ifdef XINTREE
const u32 xhash = ph[2] >> 4;
#endif
#elif BUCKBITS == 14 && RESTBITS == 6
const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2;
#elif BUCKBITS == 12 && RESTBITS == 8
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#elif BUCKBITS == 20 && RESTBITS == 4
const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4;
#ifdef XINTREE
const u32 xhash = ph[2] & 0xf;
#endif
#elif BUCKBITS == 12 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#ifdef XINTREE
const u32 xhash = ph[1] & 0xf;
#endif
#else
#error not implemented
#endif
const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1);
if (slot >= NSLOTS)
continue;
slot0 &s = eq->hta.trees0[0][bucketid][slot];
#ifdef XINTREE
s.attr = tree(block*HASHESPERBLAKE + i, xhash);
#else
s.attr = tree(block*HASHESPERBLAKE + i);
#endif
memcpy(s.hash->bytes + htl.nextbo, ph + WN / 8 - hashbytes, hashbytes);
}
}
}
__global__ void digitO(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1])) << 4
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN % 24 == 0 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (xhash = bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
xhash &= 0xf;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN == 48 && BUCKBITS == 4 && RESTBITS == 4
xorbucketid = (u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) & 0xf) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 2
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 6;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[r / 2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i = htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
__global__ void digitE(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]);
u32 xhash = (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN % 24 == 0 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN == 48 && BUCKBITS == 4 && RESTBITS == 4
xorbucketid = (u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 6)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 2;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[r / 2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i = htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
#ifdef UNROLL
// bucket mask
static const u32 BUCKMASK = NBUCKETS - 1;
__global__ void digit_1(equi *eq) {
equi::htlayout htl(eq, 1);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[0][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word;
}
}
}
}
__global__ void digit2(equi *eq) {
equi::htlayout htl(eq, 2);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[0][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit3(equi *eq) {
equi::htlayout htl(eq, 3);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[1][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x1234);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit4(equi *eq) {
equi::htlayout htl(eq, 4);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[1][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4123);
const u32 xorbucketid = bexor >> 8;
const u32 xhash = bexor >> 4 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit5(equi *eq) {
equi::htlayout htl(eq, 5);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit6(equi *eq) {
equi::htlayout htl(eq, 6);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
}
}
}
}
__global__ void digit7(equi *eq) {
equi::htlayout htl(eq, 7);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[3][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4012);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
}
}
}
}
__global__ void digit8(equi *eq) {
equi::htlayout htl(eq, 8);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[3][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x3456);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
}
}
}
}
#endif
__global__ void digitK(equi *eq) {
equi::collisiondata cd;
equi::htlayout htl(eq, WK);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(WK - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid); // assume WK odd
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision();) { // assume WK odd
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash) && pslot0->attr.prob_disjoint(pslot1->attr)) {
#ifdef XINTREE
eq->candidate(tree(bucketid, s0, s1, 0));
#else
eq->candidate(tree(bucketid, s0, s1));
#endif
}
}
}
}
}
eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id)
: threadsperblock(tpb), totalblocks(blocks), device_id(id)
{
eq = new equi(threadsperblock * totalblocks);
sol_memory = malloc(sizeof(proof) * MAXSOLS + 4096);
solutions = (proof*)(((long long)sol_memory + 4095) & -4096);
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
checkCudaErrors(hipSetDeviceFlags(hipDeviceScheduleBlockingSync));
checkCudaErrors(hipDeviceSetCacheConfig(hipFuncCachePreferL1));
checkCudaErrors(hipMalloc((void**)&heap0, sizeof(digit0)));
checkCudaErrors(hipMalloc((void**)&heap1, sizeof(digit1)));
for (u32 r = 0; r < WK; r++)
if ((r & 1) == 0)
eq->hta.trees0[r / 2] = (bucket0 *)(heap0 + r / 2);
else
eq->hta.trees1[r / 2] = (bucket1 *)(heap1 + r / 2);
checkCudaErrors(hipMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32)));
checkCudaErrors(hipMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof)));
checkCudaErrors(hipMalloc((void**)&device_eq, sizeof(equi)));
}
eq_cuda_context::~eq_cuda_context()
{
/*checkCudaErrors(hipFree(eq->nslots));
checkCudaErrors(hipFree(eq->sols));
checkCudaErrors(hipFree(eq->hta.trees0[0]));
checkCudaErrors(hipFree(eq->hta.trees1[0]));*/
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
free(sol_memory);
delete eq;
}
void eq_cuda_context::solve(const char *tequihash_header,
unsigned int tequihash_header_len,
const char* nonce,
unsigned int nonce_len,
std::function<bool()> cancelf,
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf,
std::function<void(void)> hashdonef)
{
checkCudaErrors(hipSetDevice(device_id));
eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len);
checkCudaErrors(hipMemcpy(device_eq, eq, sizeof(equi), hipMemcpyHostToDevice));
digitH << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
#if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL)
digit_1 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit2 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit3 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit4 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit5 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit6 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit7 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit8 << <totalblocks, threadsperblock >> >(device_eq);
#else
for (u32 r = 1; r < WK; r++) {
r & 1 ? digitO << <totalblocks, threadsperblock >> >(device_eq, r)
: digitE << <totalblocks, threadsperblock >> >(device_eq, r);
}
#endif
if (cancelf()) return;
digitK << <totalblocks, threadsperblock >> >(device_eq);
checkCudaErrors(hipMemcpy(eq, device_eq, sizeof(equi), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(solutions, eq->sols, MAXSOLS * sizeof(proof), hipMemcpyDeviceToHost));
for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++)
{
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = solutions[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
if (cancelf()) return;
}
hashdonef();
} | 509c49c01a8ff1fed1bf9f95d20e3ae0c97e0df4.cu | // Equihash CUDA solver
// Copyright (c) 2016 John Tromp
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#define htole32(x) (x)
#define HAVE_DECL_HTOLE32 1
#include "../cpu_tromp/equi.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <functional>
#include <vector>
#include <iostream>
#include "eqcuda.hpp"
#include "blake2b.cu"
typedef uint16_t u16;
typedef uint64_t u64;
#ifndef RESTBITS
#define RESTBITS 4
#endif
// 2_log of number of buckets
#define BUCKBITS (DIGITBITS-RESTBITS)
#ifndef SAVEMEM
#if RESTBITS == 4
// can't save memory in such small buckets
#define SAVEMEM 1
#elif RESTBITS >= 8
// take advantage of law of large numbers (sum of 2^8 random numbers)
// this reduces (200,9) memory to under 144MB, with negligible discarding
#define SAVEMEM 9/14
#endif
#endif
// number of buckets
static const u32 NBUCKETS = 1 << BUCKBITS;
// 2_log of number of slots per bucket
static const u32 SLOTBITS = RESTBITS + 1 + 1;
static const u32 SLOTRANGE = 1 << SLOTBITS;
// number of slots per bucket
static const u32 NSLOTS = SLOTRANGE * SAVEMEM;
// SLOTBITS mask
static const u32 SLOTMASK = SLOTRANGE - 1;
// number of possible values of xhash (rest of n) bits
static const u32 NRESTS = 1 << RESTBITS;
// RESTBITS mask
static const u32 RESTMASK = NRESTS - 1;
// number of blocks of hashes extracted from single 512 bit blake2b output
static const u32 NBLOCKS = (NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE;
// nothing larger found in 100000 runs
static const u32 MAXSOLS = 10;
// tree node identifying its children as two different slots in
// a bucket on previous layer with the same rest bits (x-tra hash)
struct tree {
u32 bid_s0_s1_x; // manual bitfields
__device__ tree(const u32 idx, const u32 xh) {
bid_s0_s1_x = idx << RESTBITS | xh;
}
__device__ tree(const u32 idx) {
bid_s0_s1_x = idx;
}
#ifdef XINTREE
__device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) {
bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh;
#else
__device__ tree(const u32 bid, const u32 s0, const u32 s1) {
bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1;
#endif
}
__device__ u32 getindex() const {
#ifdef XINTREE
return bid_s0_s1_x >> RESTBITS;
#else
return bid_s0_s1_x;
#endif
}
__device__ u32 bucketid() const {
#ifdef XINTREE
return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS);
#else
return bid_s0_s1_x >> (2 * SLOTBITS);
#endif
}
__device__ u32 slotid0() const {
#ifdef XINTREE
return (bid_s0_s1_x >> SLOTBITS + RESTBITS) & SLOTMASK;
#else
return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK;
#endif
}
__device__ u32 slotid1() const {
#ifdef XINTREE
return (bid_s0_s1_x >> RESTBITS) & SLOTMASK;
#else
return bid_s0_s1_x & SLOTMASK;
#endif
}
__device__ u32 xhash() const {
return bid_s0_s1_x & RESTMASK;
}
__device__ bool prob_disjoint(const tree other) const {
tree xort(bid_s0_s1_x ^ other.bid_s0_s1_x);
return xort.bucketid() || (xort.slotid0() && xort.slotid1());
// next two tests catch much fewer cases and are therefore skipped
// && slotid0() != other.slotid1() && slotid1() != other.slotid0()
}
};
union hashunit {
u32 word;
uchar bytes[sizeof(u32)];
};
#define WORDS(bits) ((bits + 31) / 32)
#define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS)
#define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS)
struct slot0 {
tree attr;
hashunit hash[HASHWORDS0];
};
struct slot1 {
tree attr;
hashunit hash[HASHWORDS1];
};
// a bucket is NSLOTS treenodes
typedef slot0 bucket0[NSLOTS];
typedef slot1 bucket1[NSLOTS];
// the N-bit hash consists of K+1 n-bit "digits"
// each of which corresponds to a layer of NBUCKETS buckets
typedef bucket0 digit0[NBUCKETS];
typedef bucket1 digit1[NBUCKETS];
// size (in bytes) of hash in round 0 <= r < WK
u32 hhashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
// size (in bytes) of hash in round 0 <= r < WK
__device__ u32 hashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
u32 hhashwords(u32 bytes) {
return (bytes + 3) / 4;
}
__device__ u32 hashwords(u32 bytes) {
return (bytes + 3) / 4;
}
// manages hash and tree data
struct htalloc {
bucket0 *trees0[(WK + 1) / 2];
bucket1 *trees1[WK / 2];
};
typedef u32 bsizes[NBUCKETS];
struct equi {
blake2b_state blake_ctx;
htalloc hta;
bsizes *nslots;
proof *sols;
u32 nsols;
u32 nthreads;
equi(const u32 n_threads) {
nthreads = n_threads;
}
void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) {
setheader(&blake_ctx, header, len, nonce, nlen);
checkCudaErrors(cudaMemset(nslots, 0, NBUCKETS * sizeof(u32)));
nsols = 0;
}
__device__ u32 getnslots0(const u32 bid) {
u32 &nslot = nslots[0][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ u32 getnslots1(const u32 bid) {
u32 &nslot = nslots[1][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ bool orderindices(u32 *indices, u32 size) {
if (indices[0] > indices[size]) {
for (u32 i = 0; i < size; i++) {
const u32 tmp = indices[i];
indices[i] = indices[size + i];
indices[size + i] = tmp;
}
}
return false;
}
__device__ bool listindices1(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[0][t.bucketid()];
const u32 size = 1 << 0;
indices[0] = buck[t.slotid0()].attr.getindex();
indices[size] = buck[t.slotid1()].attr.getindex();
orderindices(indices, size);
return false;
}
__device__ bool listindices2(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[0][t.bucketid()];
const u32 size = 1 << 1;
return listindices1(buck[t.slotid0()].attr, indices) ||
listindices1(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices3(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[1][t.bucketid()];
const u32 size = 1 << 2;
return listindices2(buck[t.slotid0()].attr, indices) ||
listindices2(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices4(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[1][t.bucketid()];
const u32 size = 1 << 3;
return listindices3(buck[t.slotid0()].attr, indices) ||
listindices3(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices5(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[2][t.bucketid()];
const u32 size = 1 << 4;
return listindices4(buck[t.slotid0()].attr, indices) ||
listindices4(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices6(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[2][t.bucketid()];
const u32 size = 1 << 5;
return listindices5(buck[t.slotid0()].attr, indices) ||
listindices5(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices7(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[3][t.bucketid()];
const u32 size = 1 << 6;
return listindices6(buck[t.slotid0()].attr, indices) ||
listindices6(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices8(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[3][t.bucketid()];
const u32 size = 1 << 7;
return listindices7(buck[t.slotid0()].attr, indices) ||
listindices7(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices9(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[4][t.bucketid()];
const u32 size = 1 << 8;
return listindices8(buck[t.slotid0()].attr, indices) ||
listindices8(buck[t.slotid1()].attr, indices + size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ void candidate(const tree t) {
proof prf;
#if WK==9
if (listindices9(t, prf)) return;
#elif WK==7
if (listindices7(t, prf)) return;
#elif WK==5
if (listindices5(t, prf)) return;
#else
#error not implemented
#endif
u32 soli = atomicAdd(&nsols, 1);
if (soli < MAXSOLS)
#if WK==9
listindices9(t, sols[soli]);
#elif WK==7
listindices7(t, sols[soli]);
#elif WK==5
listindices5(t, sols[soli]);
#else
#error not implemented
#endif
}
void showbsizes(u32 r) {
#if defined(HIST) || defined(SPARK) || defined(LOGSPARK)
u32 ns[NBUCKETS];
checkCudaErrors(cudaMemcpy(ns, nslots[r & 1], NBUCKETS * sizeof(u32), cudaMemcpyDeviceToHost));
u32 binsizes[65];
memset(binsizes, 0, 65 * sizeof(u32));
for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) {
u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS - 6);
binsizes[bsize]++;
}
for (u32 i = 0; i < 65; i++) {
#ifdef HIST
printf(" %d:%d", i, binsizes[i]);
#else
#ifdef SPARK
u32 sparks = binsizes[i] / SPARKSCALE;
#else
u32 sparks = 0;
for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++;
sparks = sparks * 7 / SPARKSCALE;
#endif
printf("\342\226%c", '\201' + sparks);
#endif
}
printf("\n");
#endif
}
struct htlayout {
htalloc hta;
u32 prevhashunits;
u32 nexthashunits;
u32 dunits;
u32 prevbo;
u32 nextbo;
__device__ htlayout(equi *eq, u32 r) : hta(eq->hta), prevhashunits(0), dunits(0) {
u32 nexthashbytes = hashsize(r);
nexthashunits = hashwords(nexthashbytes);
prevbo = 0;
nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3
if (r) {
u32 prevhashbytes = hashsize(r - 1);
prevhashunits = hashwords(prevhashbytes);
prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3
dunits = prevhashunits - nexthashunits;
}
}
__device__ u32 getxhash0(const slot0* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif DIGITBITS % 8 == 4 && RESTBITS == 4
return pslot->hash->bytes[prevbo] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 6
return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo + 1] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 8
return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo + 1] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 10
return (pslot->hash->bytes[prevbo] & 0x3f) << 4 | pslot->hash->bytes[prevbo + 1] >> 4;
#elif DIGITBITS % 8 == 0 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif RESTBITS == 0
return 0;
#else
#error non implemented
#endif
}
__device__ u32 getxhash1(const slot1* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif DIGITBITS % 4 == 0 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif DIGITBITS % 4 == 0 && RESTBITS == 6
return pslot->hash->bytes[prevbo] & 0x3f;
#elif DIGITBITS % 4 == 0 && RESTBITS == 8
return pslot->hash->bytes[prevbo];
#elif DIGITBITS % 4 == 0 && RESTBITS == 10
return (pslot->hash->bytes[prevbo] & 0x3) << 8 | pslot->hash->bytes[prevbo + 1];
#elif RESTBITS == 0
return 0;
#else
#error non implemented
#endif
}
__device__ bool equal(const hashunit *hash0, const hashunit *hash1) const {
return hash0[prevhashunits - 1].word == hash1[prevhashunits - 1].word;
}
};
struct collisiondata {
#ifdef XBITMAP
#if NSLOTS > 64
#error cant use XBITMAP with more than 64 slots
#endif
u64 xhashmap[NRESTS];
u64 xmap;
#else
#if RESTBITS <= 6
typedef uchar xslot;
#else
typedef u16 xslot;
#endif
static const xslot xnil = ~0;
xslot xhashslots[NRESTS];
xslot nextxhashslot[NSLOTS];
xslot nextslot;
#endif
u32 s0;
__device__ void clear() {
#ifdef XBITMAP
memset(xhashmap, 0, NRESTS * sizeof(u64));
#else
memset(xhashslots, xnil, NRESTS * sizeof(xslot));
memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot));
#endif
}
__device__ void addslot(u32 s1, u32 xh) {
#ifdef XBITMAP
xmap = xhashmap[xh];
xhashmap[xh] |= (u64)1 << s1;
s0 = ~0;
#else
nextslot = xhashslots[xh];
nextxhashslot[s1] = nextslot;
xhashslots[xh] = s1;
#endif
}
__device__ bool nextcollision() const {
#ifdef XBITMAP
return xmap != 0;
#else
return nextslot != xnil;
#endif
}
__device__ u32 slot() {
#ifdef XBITMAP
const u32 ffs = __ffsll(xmap);
s0 += ffs; xmap >>= ffs;
#else
nextslot = nextxhashslot[s0 = nextslot];
#endif
return s0;
}
};
};
__global__ void digitH(equi *eq) {
uchar hash[HASHOUT];
blake2b_state state;
equi::htlayout htl(eq, 0);
const u32 hashbytes = hashsize(0); // always 23 ?
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 block = id; block < NBLOCKS; block += eq->nthreads) {
state = eq->blake_ctx;
blake2b_gpu_hash(&state, block, hash, HASHOUT);
for (u32 i = 0; i<HASHESPERBLAKE; i++) {
const uchar *ph = hash + i * WN / 8;
#if BUCKBITS == 16 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 8) | ph[1];
#ifdef XINTREE
const u32 xhash = ph[2] >> 4;
#endif
#elif BUCKBITS == 14 && RESTBITS == 6
const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2;
#elif BUCKBITS == 12 && RESTBITS == 8
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#elif BUCKBITS == 20 && RESTBITS == 4
const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4;
#ifdef XINTREE
const u32 xhash = ph[2] & 0xf;
#endif
#elif BUCKBITS == 12 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#ifdef XINTREE
const u32 xhash = ph[1] & 0xf;
#endif
#else
#error not implemented
#endif
const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1);
if (slot >= NSLOTS)
continue;
slot0 &s = eq->hta.trees0[0][bucketid][slot];
#ifdef XINTREE
s.attr = tree(block*HASHESPERBLAKE + i, xhash);
#else
s.attr = tree(block*HASHESPERBLAKE + i);
#endif
memcpy(s.hash->bytes + htl.nextbo, ph + WN / 8 - hashbytes, hashbytes);
}
}
}
__global__ void digitO(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1])) << 4
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN % 24 == 0 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (xhash = bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
xhash &= 0xf;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN == 48 && BUCKBITS == 4 && RESTBITS == 4
xorbucketid = (u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) & 0xf) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 2
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 6;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[r / 2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i = htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
__global__ void digitE(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]);
u32 xhash = (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN % 24 == 0 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN == 48 && BUCKBITS == 4 && RESTBITS == 4
xorbucketid = (u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 6)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 2;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[r / 2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i = htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
#ifdef UNROLL
// bucket mask
static const u32 BUCKMASK = NBUCKETS - 1;
__global__ void digit_1(equi *eq) {
equi::htlayout htl(eq, 1);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[0][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word;
}
}
}
}
__global__ void digit2(equi *eq) {
equi::htlayout htl(eq, 2);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[0][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit3(equi *eq) {
equi::htlayout htl(eq, 3);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[1][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x1234);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit4(equi *eq) {
equi::htlayout htl(eq, 4);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[1][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4123);
const u32 xorbucketid = bexor >> 8;
const u32 xhash = bexor >> 4 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit5(equi *eq) {
equi::htlayout htl(eq, 5);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit6(equi *eq) {
equi::htlayout htl(eq, 6);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
}
}
}
}
__global__ void digit7(equi *eq) {
equi::htlayout htl(eq, 7);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[3][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4012);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
}
}
}
}
__global__ void digit8(equi *eq) {
equi::htlayout htl(eq, 8);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[3][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x3456);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
}
}
}
}
#endif
__global__ void digitK(equi *eq) {
equi::collisiondata cd;
equi::htlayout htl(eq, WK);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(WK - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid); // assume WK odd
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision();) { // assume WK odd
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash) && pslot0->attr.prob_disjoint(pslot1->attr)) {
#ifdef XINTREE
eq->candidate(tree(bucketid, s0, s1, 0));
#else
eq->candidate(tree(bucketid, s0, s1));
#endif
}
}
}
}
}
eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id)
: threadsperblock(tpb), totalblocks(blocks), device_id(id)
{
eq = new equi(threadsperblock * totalblocks);
sol_memory = malloc(sizeof(proof) * MAXSOLS + 4096);
solutions = (proof*)(((long long)sol_memory + 4095) & -4096);
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
checkCudaErrors(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync));
checkCudaErrors(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
checkCudaErrors(cudaMalloc((void**)&heap0, sizeof(digit0)));
checkCudaErrors(cudaMalloc((void**)&heap1, sizeof(digit1)));
for (u32 r = 0; r < WK; r++)
if ((r & 1) == 0)
eq->hta.trees0[r / 2] = (bucket0 *)(heap0 + r / 2);
else
eq->hta.trees1[r / 2] = (bucket1 *)(heap1 + r / 2);
checkCudaErrors(cudaMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32)));
checkCudaErrors(cudaMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof)));
checkCudaErrors(cudaMalloc((void**)&device_eq, sizeof(equi)));
}
eq_cuda_context::~eq_cuda_context()
{
/*checkCudaErrors(cudaFree(eq->nslots));
checkCudaErrors(cudaFree(eq->sols));
checkCudaErrors(cudaFree(eq->hta.trees0[0]));
checkCudaErrors(cudaFree(eq->hta.trees1[0]));*/
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
free(sol_memory);
delete eq;
}
void eq_cuda_context::solve(const char *tequihash_header,
unsigned int tequihash_header_len,
const char* nonce,
unsigned int nonce_len,
std::function<bool()> cancelf,
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf,
std::function<void(void)> hashdonef)
{
checkCudaErrors(cudaSetDevice(device_id));
eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len);
checkCudaErrors(cudaMemcpy(device_eq, eq, sizeof(equi), cudaMemcpyHostToDevice));
digitH << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
#if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL)
digit_1 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit2 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit3 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit4 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit5 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit6 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit7 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit8 << <totalblocks, threadsperblock >> >(device_eq);
#else
for (u32 r = 1; r < WK; r++) {
r & 1 ? digitO << <totalblocks, threadsperblock >> >(device_eq, r)
: digitE << <totalblocks, threadsperblock >> >(device_eq, r);
}
#endif
if (cancelf()) return;
digitK << <totalblocks, threadsperblock >> >(device_eq);
checkCudaErrors(cudaMemcpy(eq, device_eq, sizeof(equi), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(solutions, eq->sols, MAXSOLS * sizeof(proof), cudaMemcpyDeviceToHost));
for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++)
{
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = solutions[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
if (cancelf()) return;
}
hashdonef();
} |
e269c99ded076b9f7d5881e77965220d354efe0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
/* printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) "
"gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z,
gridDim.x,gridDim.y,gridDim.z);*/
} | e269c99ded076b9f7d5881e77965220d354efe0e.cu | #include "includes.h"
__global__ void vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
/* printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) "
"gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z,
gridDim.x,gridDim.y,gridDim.z);*/
} |
8351e55507e1f36b377c9c30a55b190f1dc9a48e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "normKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *vectors = NULL;
hipMalloc(&vectors, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
double *results = NULL;
hipMalloc(&results, XSIZE*YSIZE);
int vector_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
normKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, vectors,size,results,vector_size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
normKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, vectors,size,results,vector_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
normKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, vectors,size,results,vector_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8351e55507e1f36b377c9c30a55b190f1dc9a48e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "normKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *vectors = NULL;
cudaMalloc(&vectors, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
double *results = NULL;
cudaMalloc(&results, XSIZE*YSIZE);
int vector_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
normKernel<<<gridBlock,threadBlock>>>(vectors,size,results,vector_size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
normKernel<<<gridBlock,threadBlock>>>(vectors,size,results,vector_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
normKernel<<<gridBlock,threadBlock>>>(vectors,size,results,vector_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bc11cc47de2c9b2215855ea30b8931bc5ad8f6be.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
__global__ void print_details_of_warps()
{
int gid = blockIdx.x * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
int warp_id = threadIdx.x / 32;
int grid_idx = blockIdx.y * gridDim.x + blockIdx.x;
printf("tid : %d, bid.x : %d, bid.y : %d, gid : %d, warp_id : %d, grid_idx : %d\n",
threadIdx.x, blockIdx.x, blockIdx.y, gid, warp_id, grid_idx);
}
__global__ void code_without_divergence()
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
float a,b;
a = b = 0;
int warp_id = gid / 32;
if(warp_id % 2 == 0)
{
a = 100.0;
b = 50.0;
}
else{
a = 200;
b = 75;
}
}
__global__ void divergence_code()
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
float a,b;
a = b = 0;
if(gid%2 == 0){
a = 100.0;
b = 50.0;
}else{
a = 200;
b = 75;
}
}
int main(int argc, char** argv){
int size = 1 << 22;
dim3 block_size(128);
dim3 grid_size((size+block_size.x-1)/block_size.x);
hipLaunchKernelGGL(( code_without_divergence) , dim3(grid_size), dim3(block_size), 0, 0, );
hipDeviceSynchronize();
hipLaunchKernelGGL(( divergence_code) , dim3(grid_size), dim3(block_size), 0, 0, );
hipDeviceSynchronize();
hipDeviceReset();
return 0;
}
| bc11cc47de2c9b2215855ea30b8931bc5ad8f6be.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__global__ void print_details_of_warps()
{
int gid = blockIdx.x * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
int warp_id = threadIdx.x / 32;
int grid_idx = blockIdx.y * gridDim.x + blockIdx.x;
printf("tid : %d, bid.x : %d, bid.y : %d, gid : %d, warp_id : %d, grid_idx : %d\n",
threadIdx.x, blockIdx.x, blockIdx.y, gid, warp_id, grid_idx);
}
__global__ void code_without_divergence()
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
float a,b;
a = b = 0;
int warp_id = gid / 32;
if(warp_id % 2 == 0)
{
a = 100.0;
b = 50.0;
}
else{
a = 200;
b = 75;
}
}
__global__ void divergence_code()
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
float a,b;
a = b = 0;
if(gid%2 == 0){
a = 100.0;
b = 50.0;
}else{
a = 200;
b = 75;
}
}
int main(int argc, char** argv){
int size = 1 << 22;
dim3 block_size(128);
dim3 grid_size((size+block_size.x-1)/block_size.x);
code_without_divergence <<< grid_size, block_size>>>();
cudaDeviceSynchronize();
divergence_code <<< grid_size, block_size>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
5921a44d4ab86fc571a85d230d0a1d379977f9ee.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <quda_internal.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <quda_matrix.h>
#include <float_vector.h>
#include <complex_quda.h>
namespace quda {
#ifdef GPU_GAUGE_TOOLS
template <typename Float, typename Gauge, typename Mom>
struct UpdateGaugeArg {
Gauge out;
Gauge in;
Mom momentum;
Float dt;
int nDim;
UpdateGaugeArg(const Gauge &out, const Gauge &in,
const Mom &momentum, Float dt, int nDim)
: out(out), in(in), momentum(momentum), dt(dt), nDim(nDim) { }
};
template<typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
__device__ __host__ void updateGaugeFieldCompute
(UpdateGaugeArg<Float,Gauge,Mom> &arg, int x, int parity) {
typedef complex<Float> Complex;
Matrix<Complex,3> link, result, mom;
for(int dir=0; dir<arg.nDim; ++dir){
link = arg.in(dir, x, parity);
mom = arg.momentum(dir, x, parity);
Complex trace = getTrace(mom);
mom(0,0) -= trace/static_cast<Float>(3.0);
mom(1,1) -= trace/static_cast<Float>(3.0);
mom(2,2) -= trace/static_cast<Float>(3.0);
if (!exact) {
result = link;
// Nth order expansion of exponential
if (!conj_mom) {
for(int r=N; r>0; r--)
result = (arg.dt/r)*mom*result + link;
} else {
for(int r=N; r>0; r--)
result = (arg.dt/r)*conj(mom)*result + link;
}
} else {
mom = arg.dt * mom;
expsu3<Float>(mom);
if (!conj_mom) {
link = mom * link;
} else {
link = conj(mom) * link;
}
result = link;
}
arg.out(dir, x, parity) = result;
} // dir
}
template<typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
void updateGaugeField(UpdateGaugeArg<Float,Gauge,Mom> arg) {
for (unsigned int parity=0; parity<2; parity++) {
for (int x=0; x<arg.out.volumeCB; x++) {
updateGaugeFieldCompute<Float,Gauge,Mom,N,conj_mom,exact>
(arg, x, parity);
}
}
}
template<typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
__global__ void updateGaugeFieldKernel(UpdateGaugeArg<Float,Gauge,Mom> arg) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= 2*arg.out.volumeCB) return;
int parity = (idx >= arg.out.volumeCB) ? 1 : 0;
idx -= parity*arg.out.volumeCB;
updateGaugeFieldCompute<Float,Gauge,Mom,N,conj_mom,exact>(arg, idx, parity);
}
template <typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
class UpdateGaugeField : public Tunable {
private:
UpdateGaugeArg<Float,Gauge,Mom> arg;
const GaugeField &meta; // meta data
const QudaFieldLocation location; // location of the lattice fields
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
unsigned int minThreads() const { return 2*arg.in.volumeCB; }
bool tuneGridDim() const { return false; }
public:
UpdateGaugeField(const UpdateGaugeArg<Float,Gauge,Mom> &arg,
const GaugeField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("threads=%d,prec=%lu,stride=%d",
2*arg.in.volumeCB, sizeof(Float), arg.in.stride);
}
virtual ~UpdateGaugeField() { }
void apply(const hipStream_t &stream){
if (location == QUDA_CUDA_FIELD_LOCATION) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( updateGaugeFieldKernel<Float,Gauge,Mom,N,conj_mom,exact>)
, dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg);
} else { // run the CPU code
updateGaugeField<Float,Gauge,Mom,N,conj_mom,exact>(arg);
}
} // apply
long long flops() const {
const int Nc = 3;
return arg.nDim*2*arg.in.volumeCB*N*(Nc*Nc*2 + // scalar-matrix multiply
(8*Nc*Nc*Nc - 2*Nc*Nc) + // matrix-matrix multiply
Nc*Nc*2); // matrix-matrix addition
}
long long bytes() const { return arg.nDim*2*arg.in.volumeCB*
(arg.in.Bytes() + arg.out.Bytes() + arg.momentum.Bytes()); }
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
};
template <typename Float, typename Gauge, typename Mom>
void updateGaugeField(Gauge &out, const Gauge &in, const Mom &mom,
double dt, const GaugeField &meta, bool conj_mom, bool exact,
QudaFieldLocation location) {
// degree of exponential expansion
const int N = 8;
if (conj_mom) {
if (exact) {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,true,true> updateGauge(arg, meta, location);
updateGauge.apply(0);
} else {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,true,false> updateGauge(arg, meta, location);
updateGauge.apply(0);
}
} else {
if (exact) {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,false,true> updateGauge(arg, meta, location);
updateGauge.apply(0);
} else {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,false,false> updateGauge(arg, meta, location);
updateGauge.apply(0);
}
}
if (location == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template <typename Float, typename Gauge>
void updateGaugeField(Gauge out, const Gauge &in, const GaugeField &mom,
double dt, bool conj_mom, bool exact,
QudaFieldLocation location) {
if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (mom.Reconstruct() == QUDA_RECONSTRUCT_10) {
// FIX ME - 11 is a misnomer to avoid confusion in template instantiation
updateGaugeField<Float>(out, in, gauge::FloatNOrder<Float,18,2,11>(mom), dt, mom, conj_mom, exact, location);
} else {
errorQuda("Reconstruction type not supported");
}
} else if (mom.Order() == QUDA_MILC_GAUGE_ORDER) {
updateGaugeField<Float>(out, in, gauge::MILCOrder<Float,10>(mom), dt, mom, conj_mom, exact, location);
} else {
errorQuda("Gauge Field order %d not supported", mom.Order());
}
}
template <typename Float>
void updateGaugeField(GaugeField &out, const GaugeField &in, const GaugeField &mom,
double dt, bool conj_mom, bool exact,
QudaFieldLocation location) {
const int Nc = 3;
if (out.Ncolor() != Nc)
errorQuda("Ncolor=%d not supported at this time", out.Ncolor());
if (out.Order() != in.Order() || out.Reconstruct() != in.Reconstruct()) {
errorQuda("Input and output gauge field ordering and reconstruction must match");
}
if (out.isNative()) {
if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G;
updateGaugeField<Float>(G(out),G(in), mom, dt, conj_mom, exact, location);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G;
updateGaugeField<Float>(G(out), G(in), mom, dt, conj_mom, exact, location);
} else {
errorQuda("Reconstruction type not supported");
}
} else if (out.Order() == QUDA_MILC_GAUGE_ORDER) {
updateGaugeField<Float>(gauge::MILCOrder<Float, Nc*Nc*2>(out),
gauge::MILCOrder<Float, Nc*Nc*2>(in),
mom, dt, conj_mom, exact, location);
} else {
errorQuda("Gauge Field order %d not supported", out.Order());
}
}
#endif
void updateGaugeField(GaugeField &out, double dt, const GaugeField& in,
const GaugeField& mom, bool conj_mom, bool exact)
{
#ifdef GPU_GAUGE_TOOLS
if (out.Precision() != in.Precision() || out.Precision() != mom.Precision())
errorQuda("Gauge and momentum fields must have matching precision");
if (out.Location() != in.Location() || out.Location() != mom.Location())
errorQuda("Gauge and momentum fields must have matching location");
if (out.Precision() == QUDA_DOUBLE_PRECISION) {
updateGaugeField<double>(out, in, mom, dt, conj_mom, exact, out.Location());
} else if (out.Precision() == QUDA_SINGLE_PRECISION) {
updateGaugeField<float>(out, in, mom, dt, conj_mom, exact, out.Location());
} else {
errorQuda("Precision %d not supported", out.Precision());
}
#else
errorQuda("Gauge tools are not build");
#endif
}
} // namespace quda
| 5921a44d4ab86fc571a85d230d0a1d379977f9ee.cu | #include <cstdio>
#include <cstdlib>
#include <cuda.h>
#include <quda_internal.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <quda_matrix.h>
#include <float_vector.h>
#include <complex_quda.h>
namespace quda {
#ifdef GPU_GAUGE_TOOLS
template <typename Float, typename Gauge, typename Mom>
struct UpdateGaugeArg {
Gauge out;
Gauge in;
Mom momentum;
Float dt;
int nDim;
UpdateGaugeArg(const Gauge &out, const Gauge &in,
const Mom &momentum, Float dt, int nDim)
: out(out), in(in), momentum(momentum), dt(dt), nDim(nDim) { }
};
template<typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
__device__ __host__ void updateGaugeFieldCompute
(UpdateGaugeArg<Float,Gauge,Mom> &arg, int x, int parity) {
typedef complex<Float> Complex;
Matrix<Complex,3> link, result, mom;
for(int dir=0; dir<arg.nDim; ++dir){
link = arg.in(dir, x, parity);
mom = arg.momentum(dir, x, parity);
Complex trace = getTrace(mom);
mom(0,0) -= trace/static_cast<Float>(3.0);
mom(1,1) -= trace/static_cast<Float>(3.0);
mom(2,2) -= trace/static_cast<Float>(3.0);
if (!exact) {
result = link;
// Nth order expansion of exponential
if (!conj_mom) {
for(int r=N; r>0; r--)
result = (arg.dt/r)*mom*result + link;
} else {
for(int r=N; r>0; r--)
result = (arg.dt/r)*conj(mom)*result + link;
}
} else {
mom = arg.dt * mom;
expsu3<Float>(mom);
if (!conj_mom) {
link = mom * link;
} else {
link = conj(mom) * link;
}
result = link;
}
arg.out(dir, x, parity) = result;
} // dir
}
template<typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
void updateGaugeField(UpdateGaugeArg<Float,Gauge,Mom> arg) {
for (unsigned int parity=0; parity<2; parity++) {
for (int x=0; x<arg.out.volumeCB; x++) {
updateGaugeFieldCompute<Float,Gauge,Mom,N,conj_mom,exact>
(arg, x, parity);
}
}
}
template<typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
__global__ void updateGaugeFieldKernel(UpdateGaugeArg<Float,Gauge,Mom> arg) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= 2*arg.out.volumeCB) return;
int parity = (idx >= arg.out.volumeCB) ? 1 : 0;
idx -= parity*arg.out.volumeCB;
updateGaugeFieldCompute<Float,Gauge,Mom,N,conj_mom,exact>(arg, idx, parity);
}
template <typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
class UpdateGaugeField : public Tunable {
private:
UpdateGaugeArg<Float,Gauge,Mom> arg;
const GaugeField &meta; // meta data
const QudaFieldLocation location; // location of the lattice fields
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
unsigned int minThreads() const { return 2*arg.in.volumeCB; }
bool tuneGridDim() const { return false; }
public:
UpdateGaugeField(const UpdateGaugeArg<Float,Gauge,Mom> &arg,
const GaugeField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("threads=%d,prec=%lu,stride=%d",
2*arg.in.volumeCB, sizeof(Float), arg.in.stride);
}
virtual ~UpdateGaugeField() { }
void apply(const cudaStream_t &stream){
if (location == QUDA_CUDA_FIELD_LOCATION) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
updateGaugeFieldKernel<Float,Gauge,Mom,N,conj_mom,exact>
<<<tp.grid,tp.block,tp.shared_bytes>>>(arg);
} else { // run the CPU code
updateGaugeField<Float,Gauge,Mom,N,conj_mom,exact>(arg);
}
} // apply
long long flops() const {
const int Nc = 3;
return arg.nDim*2*arg.in.volumeCB*N*(Nc*Nc*2 + // scalar-matrix multiply
(8*Nc*Nc*Nc - 2*Nc*Nc) + // matrix-matrix multiply
Nc*Nc*2); // matrix-matrix addition
}
long long bytes() const { return arg.nDim*2*arg.in.volumeCB*
(arg.in.Bytes() + arg.out.Bytes() + arg.momentum.Bytes()); }
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
};
template <typename Float, typename Gauge, typename Mom>
void updateGaugeField(Gauge &out, const Gauge &in, const Mom &mom,
double dt, const GaugeField &meta, bool conj_mom, bool exact,
QudaFieldLocation location) {
// degree of exponential expansion
const int N = 8;
if (conj_mom) {
if (exact) {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,true,true> updateGauge(arg, meta, location);
updateGauge.apply(0);
} else {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,true,false> updateGauge(arg, meta, location);
updateGauge.apply(0);
}
} else {
if (exact) {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,false,true> updateGauge(arg, meta, location);
updateGauge.apply(0);
} else {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,false,false> updateGauge(arg, meta, location);
updateGauge.apply(0);
}
}
if (location == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template <typename Float, typename Gauge>
void updateGaugeField(Gauge out, const Gauge &in, const GaugeField &mom,
double dt, bool conj_mom, bool exact,
QudaFieldLocation location) {
if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (mom.Reconstruct() == QUDA_RECONSTRUCT_10) {
// FIX ME - 11 is a misnomer to avoid confusion in template instantiation
updateGaugeField<Float>(out, in, gauge::FloatNOrder<Float,18,2,11>(mom), dt, mom, conj_mom, exact, location);
} else {
errorQuda("Reconstruction type not supported");
}
} else if (mom.Order() == QUDA_MILC_GAUGE_ORDER) {
updateGaugeField<Float>(out, in, gauge::MILCOrder<Float,10>(mom), dt, mom, conj_mom, exact, location);
} else {
errorQuda("Gauge Field order %d not supported", mom.Order());
}
}
template <typename Float>
void updateGaugeField(GaugeField &out, const GaugeField &in, const GaugeField &mom,
double dt, bool conj_mom, bool exact,
QudaFieldLocation location) {
const int Nc = 3;
if (out.Ncolor() != Nc)
errorQuda("Ncolor=%d not supported at this time", out.Ncolor());
if (out.Order() != in.Order() || out.Reconstruct() != in.Reconstruct()) {
errorQuda("Input and output gauge field ordering and reconstruction must match");
}
if (out.isNative()) {
if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G;
updateGaugeField<Float>(G(out),G(in), mom, dt, conj_mom, exact, location);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G;
updateGaugeField<Float>(G(out), G(in), mom, dt, conj_mom, exact, location);
} else {
errorQuda("Reconstruction type not supported");
}
} else if (out.Order() == QUDA_MILC_GAUGE_ORDER) {
updateGaugeField<Float>(gauge::MILCOrder<Float, Nc*Nc*2>(out),
gauge::MILCOrder<Float, Nc*Nc*2>(in),
mom, dt, conj_mom, exact, location);
} else {
errorQuda("Gauge Field order %d not supported", out.Order());
}
}
#endif
void updateGaugeField(GaugeField &out, double dt, const GaugeField& in,
const GaugeField& mom, bool conj_mom, bool exact)
{
#ifdef GPU_GAUGE_TOOLS
if (out.Precision() != in.Precision() || out.Precision() != mom.Precision())
errorQuda("Gauge and momentum fields must have matching precision");
if (out.Location() != in.Location() || out.Location() != mom.Location())
errorQuda("Gauge and momentum fields must have matching location");
if (out.Precision() == QUDA_DOUBLE_PRECISION) {
updateGaugeField<double>(out, in, mom, dt, conj_mom, exact, out.Location());
} else if (out.Precision() == QUDA_SINGLE_PRECISION) {
updateGaugeField<float>(out, in, mom, dt, conj_mom, exact, out.Location());
} else {
errorQuda("Precision %d not supported", out.Precision());
}
#else
errorQuda("Gauge tools are not build");
#endif
}
} // namespace quda
|
f8ba9a964919ace6191dac1c9ce696ac3d5cfc0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/workspace.h"
#include "dragon/utils/device/common_cub.h"
#include "dragon/utils/device/common_thrust.h"
#include "dragon/utils/math/blas.h"
#include "dragon/utils/math/functional.h"
#include "dragon/utils/math/reduce.h"
#include "dragon/utils/math/utils.h"
namespace dragon {
namespace math {
namespace {
template <typename T, typename AccT, class Reducer>
__global__ void _RowwiseReduce(
const int rows,
const int cols,
const Reducer reducer,
const AccT init,
const AccT scale,
const T* x,
T* y) {
__shared__ typename BlockReduce<AccT>::TempStorage storage;
CUDA_2D_KERNEL_LOOP1(i, cols) {
AccT val = init;
CUDA_2D_KERNEL_LOOP2(j, rows) {
val = reducer(val, convert::To<AccT>(x[j * cols + i]));
}
val = BlockReduce<AccT>(storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
y[i] = convert::To<T>(val * scale);
}
}
}
template <typename T, typename AccT, class Reducer>
__global__ void _ColwiseReduce(
const int rows,
const int cols,
const Reducer reducer,
const AccT init,
const AccT scale,
const T* x,
T* y) {
__shared__ typename BlockReduce<AccT>::TempStorage storage;
CUDA_2D_KERNEL_LOOP1(i, rows) {
AccT val = init;
CUDA_2D_KERNEL_LOOP2(j, cols) {
val = reducer(val, convert::To<AccT>(x[i * cols + j]));
}
val = BlockReduce<AccT>(storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
y[i] = convert::To<T>(val * scale);
}
}
}
template <typename T, typename AccT, class Reducer, int D>
__global__ void _GenericReduce(
const int rows,
const int cols,
const int num_dims,
const SimpleArray<int, D> x_dims,
const SimpleArray<int, D> x_strides,
const Reducer reducer,
const AccT init,
const AccT scale,
const T* x,
T* y) {
__shared__ typename BlockReduce<AccT>::TempStorage storage;
CUDA_2D_KERNEL_LOOP1(i, rows) {
AccT val = init;
CUDA_2D_KERNEL_LOOP2(j, cols) {
int xi = 0, c = i * cols + j;
for (int d = num_dims - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(x_dims.data[d], c, &c, &r);
xi += r * x_strides.data[d];
}
val = reducer(val, convert::To<AccT>(x[xi]));
}
val = BlockReduce<AccT>(storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
y[i] = convert::To<T>(val * scale);
}
}
}
#define DEFINE_REDUCE_DISPATCHER(name) \
template <typename T, typename AccT, typename Reducer> \
void _Reduce##name( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const Reducer reducer, \
const AccT init, \
const AccT scale, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
int rows, cols; \
vec32_t out_dims(dims, dims + num_dims); \
for (int i = 0; i < num_axes; ++i) { \
out_dims[axes[i]] = 1; \
} \
if (math::utils::IsRowwiseReduce( \
num_dims, dims, out_dims.data(), &rows, &cols)) { \
hipLaunchKernelGGL(( _RowwiseReduce), \
CUDA_2D_BLOCKS(cols), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), rows, cols, reducer, init, scale, x, y); \
return; \
} \
if (math::utils::IsColwiseReduce( \
num_dims, dims, out_dims.data(), &rows, &cols)) { \
hipLaunchKernelGGL(( _ColwiseReduce), \
CUDA_2D_BLOCKS(rows), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), rows, cols, reducer, init, scale, x, y); \
return; \
} \
CUDA_TENSOR_DIMS_CHECK(num_dims); \
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> transpose_axes; \
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> transpose_strides; \
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> transpose_dims; \
math::utils::TransposeAxesForReduce( \
num_dims, num_axes, axes, transpose_axes.data); \
math::utils::ComputeTransposeStrides( \
num_dims, dims, transpose_axes.data, transpose_strides.data); \
rows = cols = 1; \
const int pivot = num_dims - num_axes; \
for (int i = 0; i < pivot; ++i) { \
rows *= dims[transpose_axes.data[i]]; \
} \
for (int i = pivot; i < num_dims; ++i) { \
cols *= dims[transpose_axes.data[i]]; \
} \
for (int i = 0; i < num_dims; ++i) { \
transpose_dims.data[i] = dims[transpose_axes.data[i]]; \
} \
hipLaunchKernelGGL(( _GenericReduce), \
CUDA_2D_BLOCKS(rows), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), \
rows, \
cols, \
num_dims, \
transpose_dims, \
transpose_strides, \
reducer, \
init, \
scale, \
x, \
y); \
}
DEFINE_REDUCE_DISPATCHER(Max);
DEFINE_REDUCE_DISPATCHER(Min);
DEFINE_REDUCE_DISPATCHER(Sum);
#undef DEFINE_REDUCE_DISPATCHER
} // namespace
/* ------------------- Launcher Separator ------------------- */
// Disable FP16 DeviceReduce.
// We found that FP16 accumulator drops too many small values in
// empirical experiments.
template <>
DRAGON_API void ReduceSum<float16, CUDAContext>(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const float scale,
const float16* x,
float16* y,
CUDAContext* ctx) {
// NB: Performance may drop in some cases.
_ReduceSum(
num_dims,
dims,
num_axes,
axes,
math::PlusFunctor<float>(),
0.f,
scale,
x,
y,
ctx);
}
#define DEFINE_KERNEL_LAUNCHER(name, T, AccT, Reducer, kInit) \
template <> \
DRAGON_API void Reduce##name<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const float scale, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
const int count = \
std::accumulate(dims, dims + num_dims, 1, std::multiplies<int>()); \
if (num_dims == num_axes && count > 10000) { \
size_t ws_nbytes = 0; \
hipcub::DeviceReduce::Reduce( \
nullptr, \
ws_nbytes, \
x, \
y, \
count, \
Reducer<T>(), \
convert::To<T>(kInit), \
ctx->cuda_stream()); \
hipcub::DeviceReduce::Reduce( \
ctx->workspace()->data<CUDAContext>({ws_nbytes}, "data:1")[0], \
ws_nbytes, \
x, \
y, \
count, \
Reducer<T>(), \
convert::To<T>(kInit), \
ctx->cuda_stream()); \
math::Scale(1, scale, y, y, ctx); \
return; \
} \
_Reduce##name( \
num_dims, \
dims, \
num_axes, \
axes, \
Reducer<AccT>(), \
convert::To<AccT>(kInit), \
convert::To<AccT>(scale), \
x, \
y, \
ctx); \
}
DEFINE_KERNEL_LAUNCHER(
Max,
int8_t,
int8_t,
math::MaxFunctor,
std::numeric_limits<int8_t>::lowest());
DEFINE_KERNEL_LAUNCHER(
Max,
uint8_t,
uint8_t,
math::MaxFunctor,
std::numeric_limits<uint8_t>::lowest());
DEFINE_KERNEL_LAUNCHER(
Max,
int,
int,
math::MaxFunctor,
std::numeric_limits<int>::lowest());
DEFINE_KERNEL_LAUNCHER(
Max,
int64_t,
int64_t,
math::MaxFunctor,
std::numeric_limits<int64_t>::lowest());
DEFINE_KERNEL_LAUNCHER(
Max,
float16,
float,
math::MaxFunctor,
cub::Traits<half>::Lowest());
DEFINE_KERNEL_LAUNCHER(
Max,
float,
float,
math::MaxFunctor,
std::numeric_limits<float>::lowest());
DEFINE_KERNEL_LAUNCHER(
Max,
double,
double,
math::MaxFunctor,
std::numeric_limits<double>::lowest());
DEFINE_KERNEL_LAUNCHER(
Min,
int8_t,
int8_t,
math::MinFunctor,
std::numeric_limits<int8_t>::max());
DEFINE_KERNEL_LAUNCHER(
Min,
uint8_t,
uint8_t,
math::MinFunctor,
std::numeric_limits<uint8_t>::max());
DEFINE_KERNEL_LAUNCHER(
Min,
int,
int,
math::MinFunctor,
std::numeric_limits<int>::max());
DEFINE_KERNEL_LAUNCHER(
Min,
int64_t,
int64_t,
math::MinFunctor,
std::numeric_limits<int64_t>::max());
DEFINE_KERNEL_LAUNCHER(
Min,
float16,
float,
math::MinFunctor,
cub::Traits<half>::Max());
DEFINE_KERNEL_LAUNCHER(
Min,
float,
float,
math::MinFunctor,
std::numeric_limits<float>::max());
DEFINE_KERNEL_LAUNCHER(
Min,
double,
double,
math::MinFunctor,
std::numeric_limits<double>::max());
DEFINE_KERNEL_LAUNCHER(Sum, int8_t, int8_t, math::PlusFunctor, int8_t(0));
DEFINE_KERNEL_LAUNCHER(Sum, uint8_t, uint8_t, math::PlusFunctor, uint8_t(0));
DEFINE_KERNEL_LAUNCHER(Sum, int, int, math::PlusFunctor, int(0));
DEFINE_KERNEL_LAUNCHER(Sum, int64_t, int64_t, math::PlusFunctor, int64_t(0));
DEFINE_KERNEL_LAUNCHER(Sum, float, float, math::PlusFunctor, 0.f);
DEFINE_KERNEL_LAUNCHER(Sum, double, double, math::PlusFunctor, 0.);
#undef DEFINE_KERNEL_LAUNCHER
#define DEFINE_SUM_FUNC(T) \
template <> \
DRAGON_API void Sum<T, CUDAContext>( \
const int n, const float alpha, const T* x, T* y, CUDAContext* ctx) { \
vec32_t dims = {n}, axes = {0}; \
math::ReduceSum(1, dims.data(), 1, axes.data(), alpha, x, y, ctx); \
}
DEFINE_SUM_FUNC(int8_t);
DEFINE_SUM_FUNC(uint8_t);
DEFINE_SUM_FUNC(int);
DEFINE_SUM_FUNC(int64_t);
DEFINE_SUM_FUNC(float16);
DEFINE_SUM_FUNC(float);
DEFINE_SUM_FUNC(double);
#undef DEFINE_SUM_FUNC
#define DEFINE_SUM_FUNC(T) \
template <> \
DRAGON_API T Sum<T, CUDAContext>( \
const int n, const float alpha, const T* x, CUDAContext* ctx) { \
auto policy = thrust::hip::par.on(ctx->cuda_stream()); \
auto val = thrust::reduce(policy, x, x + n) * alpha; \
return static_cast<T>(val); \
}
DEFINE_SUM_FUNC(int8_t);
DEFINE_SUM_FUNC(uint8_t);
DEFINE_SUM_FUNC(int);
DEFINE_SUM_FUNC(int64_t);
DEFINE_SUM_FUNC(float);
DEFINE_SUM_FUNC(double);
#undef DEFINE_SUM_FUNC
} // namespace math
} // namespace dragon
#endif // USE_ROCM
| f8ba9a964919ace6191dac1c9ce696ac3d5cfc0c.cu | #ifdef USE_CUDA
#include "dragon/core/workspace.h"
#include "dragon/utils/device/common_cub.h"
#include "dragon/utils/device/common_thrust.h"
#include "dragon/utils/math/blas.h"
#include "dragon/utils/math/functional.h"
#include "dragon/utils/math/reduce.h"
#include "dragon/utils/math/utils.h"
namespace dragon {
namespace math {
namespace {
template <typename T, typename AccT, class Reducer>
__global__ void _RowwiseReduce(
const int rows,
const int cols,
const Reducer reducer,
const AccT init,
const AccT scale,
const T* x,
T* y) {
__shared__ typename BlockReduce<AccT>::TempStorage storage;
CUDA_2D_KERNEL_LOOP1(i, cols) {
AccT val = init;
CUDA_2D_KERNEL_LOOP2(j, rows) {
val = reducer(val, convert::To<AccT>(x[j * cols + i]));
}
val = BlockReduce<AccT>(storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
y[i] = convert::To<T>(val * scale);
}
}
}
template <typename T, typename AccT, class Reducer>
__global__ void _ColwiseReduce(
const int rows,
const int cols,
const Reducer reducer,
const AccT init,
const AccT scale,
const T* x,
T* y) {
__shared__ typename BlockReduce<AccT>::TempStorage storage;
CUDA_2D_KERNEL_LOOP1(i, rows) {
AccT val = init;
CUDA_2D_KERNEL_LOOP2(j, cols) {
val = reducer(val, convert::To<AccT>(x[i * cols + j]));
}
val = BlockReduce<AccT>(storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
y[i] = convert::To<T>(val * scale);
}
}
}
template <typename T, typename AccT, class Reducer, int D>
__global__ void _GenericReduce(
const int rows,
const int cols,
const int num_dims,
const SimpleArray<int, D> x_dims,
const SimpleArray<int, D> x_strides,
const Reducer reducer,
const AccT init,
const AccT scale,
const T* x,
T* y) {
__shared__ typename BlockReduce<AccT>::TempStorage storage;
CUDA_2D_KERNEL_LOOP1(i, rows) {
AccT val = init;
CUDA_2D_KERNEL_LOOP2(j, cols) {
int xi = 0, c = i * cols + j;
for (int d = num_dims - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(x_dims.data[d], c, &c, &r);
xi += r * x_strides.data[d];
}
val = reducer(val, convert::To<AccT>(x[xi]));
}
val = BlockReduce<AccT>(storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
y[i] = convert::To<T>(val * scale);
}
}
}
#define DEFINE_REDUCE_DISPATCHER(name) \
template <typename T, typename AccT, typename Reducer> \
void _Reduce##name( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const Reducer reducer, \
const AccT init, \
const AccT scale, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
int rows, cols; \
vec32_t out_dims(dims, dims + num_dims); \
for (int i = 0; i < num_axes; ++i) { \
out_dims[axes[i]] = 1; \
} \
if (math::utils::IsRowwiseReduce( \
num_dims, dims, out_dims.data(), &rows, &cols)) { \
_RowwiseReduce<<< \
CUDA_2D_BLOCKS(cols), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>(rows, cols, reducer, init, scale, x, y); \
return; \
} \
if (math::utils::IsColwiseReduce( \
num_dims, dims, out_dims.data(), &rows, &cols)) { \
_ColwiseReduce<<< \
CUDA_2D_BLOCKS(rows), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>(rows, cols, reducer, init, scale, x, y); \
return; \
} \
CUDA_TENSOR_DIMS_CHECK(num_dims); \
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> transpose_axes; \
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> transpose_strides; \
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> transpose_dims; \
math::utils::TransposeAxesForReduce( \
num_dims, num_axes, axes, transpose_axes.data); \
math::utils::ComputeTransposeStrides( \
num_dims, dims, transpose_axes.data, transpose_strides.data); \
rows = cols = 1; \
const int pivot = num_dims - num_axes; \
for (int i = 0; i < pivot; ++i) { \
rows *= dims[transpose_axes.data[i]]; \
} \
for (int i = pivot; i < num_dims; ++i) { \
cols *= dims[transpose_axes.data[i]]; \
} \
for (int i = 0; i < num_dims; ++i) { \
transpose_dims.data[i] = dims[transpose_axes.data[i]]; \
} \
_GenericReduce<<< \
CUDA_2D_BLOCKS(rows), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>( \
rows, \
cols, \
num_dims, \
transpose_dims, \
transpose_strides, \
reducer, \
init, \
scale, \
x, \
y); \
}
DEFINE_REDUCE_DISPATCHER(Max);
DEFINE_REDUCE_DISPATCHER(Min);
DEFINE_REDUCE_DISPATCHER(Sum);
#undef DEFINE_REDUCE_DISPATCHER
} // namespace
/* ------------------- Launcher Separator ------------------- */
// Disable FP16 DeviceReduce.
// We found that FP16 accumulator drops too many small values in
// empirical experiments.
template <>
DRAGON_API void ReduceSum<float16, CUDAContext>(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const float scale,
const float16* x,
float16* y,
CUDAContext* ctx) {
// NB: Performance may drop in some cases.
_ReduceSum(
num_dims,
dims,
num_axes,
axes,
math::PlusFunctor<float>(),
0.f,
scale,
x,
y,
ctx);
}
#define DEFINE_KERNEL_LAUNCHER(name, T, AccT, Reducer, kInit) \
template <> \
DRAGON_API void Reduce##name<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const float scale, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
const int count = \
std::accumulate(dims, dims + num_dims, 1, std::multiplies<int>()); \
if (num_dims == num_axes && count > 10000) { \
size_t ws_nbytes = 0; \
cub::DeviceReduce::Reduce( \
nullptr, \
ws_nbytes, \
x, \
y, \
count, \
Reducer<T>(), \
convert::To<T>(kInit), \
ctx->cuda_stream()); \
cub::DeviceReduce::Reduce( \
ctx->workspace()->data<CUDAContext>({ws_nbytes}, "data:1")[0], \
ws_nbytes, \
x, \
y, \
count, \
Reducer<T>(), \
convert::To<T>(kInit), \
ctx->cuda_stream()); \
math::Scale(1, scale, y, y, ctx); \
return; \
} \
_Reduce##name( \
num_dims, \
dims, \
num_axes, \
axes, \
Reducer<AccT>(), \
convert::To<AccT>(kInit), \
convert::To<AccT>(scale), \
x, \
y, \
ctx); \
}
DEFINE_KERNEL_LAUNCHER(
Max,
int8_t,
int8_t,
math::MaxFunctor,
std::numeric_limits<int8_t>::lowest());
DEFINE_KERNEL_LAUNCHER(
Max,
uint8_t,
uint8_t,
math::MaxFunctor,
std::numeric_limits<uint8_t>::lowest());
DEFINE_KERNEL_LAUNCHER(
Max,
int,
int,
math::MaxFunctor,
std::numeric_limits<int>::lowest());
DEFINE_KERNEL_LAUNCHER(
Max,
int64_t,
int64_t,
math::MaxFunctor,
std::numeric_limits<int64_t>::lowest());
DEFINE_KERNEL_LAUNCHER(
Max,
float16,
float,
math::MaxFunctor,
cub::Traits<half>::Lowest());
DEFINE_KERNEL_LAUNCHER(
Max,
float,
float,
math::MaxFunctor,
std::numeric_limits<float>::lowest());
DEFINE_KERNEL_LAUNCHER(
Max,
double,
double,
math::MaxFunctor,
std::numeric_limits<double>::lowest());
DEFINE_KERNEL_LAUNCHER(
Min,
int8_t,
int8_t,
math::MinFunctor,
std::numeric_limits<int8_t>::max());
DEFINE_KERNEL_LAUNCHER(
Min,
uint8_t,
uint8_t,
math::MinFunctor,
std::numeric_limits<uint8_t>::max());
DEFINE_KERNEL_LAUNCHER(
Min,
int,
int,
math::MinFunctor,
std::numeric_limits<int>::max());
DEFINE_KERNEL_LAUNCHER(
Min,
int64_t,
int64_t,
math::MinFunctor,
std::numeric_limits<int64_t>::max());
DEFINE_KERNEL_LAUNCHER(
Min,
float16,
float,
math::MinFunctor,
cub::Traits<half>::Max());
DEFINE_KERNEL_LAUNCHER(
Min,
float,
float,
math::MinFunctor,
std::numeric_limits<float>::max());
DEFINE_KERNEL_LAUNCHER(
Min,
double,
double,
math::MinFunctor,
std::numeric_limits<double>::max());
DEFINE_KERNEL_LAUNCHER(Sum, int8_t, int8_t, math::PlusFunctor, int8_t(0));
DEFINE_KERNEL_LAUNCHER(Sum, uint8_t, uint8_t, math::PlusFunctor, uint8_t(0));
DEFINE_KERNEL_LAUNCHER(Sum, int, int, math::PlusFunctor, int(0));
DEFINE_KERNEL_LAUNCHER(Sum, int64_t, int64_t, math::PlusFunctor, int64_t(0));
DEFINE_KERNEL_LAUNCHER(Sum, float, float, math::PlusFunctor, 0.f);
DEFINE_KERNEL_LAUNCHER(Sum, double, double, math::PlusFunctor, 0.);
#undef DEFINE_KERNEL_LAUNCHER
#define DEFINE_SUM_FUNC(T) \
template <> \
DRAGON_API void Sum<T, CUDAContext>( \
const int n, const float alpha, const T* x, T* y, CUDAContext* ctx) { \
vec32_t dims = {n}, axes = {0}; \
math::ReduceSum(1, dims.data(), 1, axes.data(), alpha, x, y, ctx); \
}
DEFINE_SUM_FUNC(int8_t);
DEFINE_SUM_FUNC(uint8_t);
DEFINE_SUM_FUNC(int);
DEFINE_SUM_FUNC(int64_t);
DEFINE_SUM_FUNC(float16);
DEFINE_SUM_FUNC(float);
DEFINE_SUM_FUNC(double);
#undef DEFINE_SUM_FUNC
#define DEFINE_SUM_FUNC(T) \
template <> \
DRAGON_API T Sum<T, CUDAContext>( \
const int n, const float alpha, const T* x, CUDAContext* ctx) { \
auto policy = thrust::cuda::par.on(ctx->cuda_stream()); \
auto val = thrust::reduce(policy, x, x + n) * alpha; \
return static_cast<T>(val); \
}
DEFINE_SUM_FUNC(int8_t);
DEFINE_SUM_FUNC(uint8_t);
DEFINE_SUM_FUNC(int);
DEFINE_SUM_FUNC(int64_t);
DEFINE_SUM_FUNC(float);
DEFINE_SUM_FUNC(double);
#undef DEFINE_SUM_FUNC
} // namespace math
} // namespace dragon
#endif // USE_CUDA
|
5024c8039c8564596591aa3404d49ba2367b6c24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matrix.h"
using namespace std;
/*
LIMITE 6144 double ou 49152 bytes 32 THREAD MAX
64 block max
*/
__global__ void multiply(double * A, double * B, double * C,int local_size)
{
extern __shared__ double shared[];
double * intern_A = &shared[0];
double * intern_B = &shared[local_size];
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
intern_A[threadIdx.y*blockDim.x + threadIdx.x] = A[row*DIM + threadIdx.x+blockDim.x*blockIdx.z];
intern_B[threadIdx.y*blockDim.x + threadIdx.x] = B[(threadIdx.y+blockDim.y*blockIdx.z)*DIM + col];
double result = 0.0;
for(int i=0; i<blockDim.x; i++)
{
result+= intern_A[threadIdx.y*blockDim.x + i]*intern_B[i*blockDim.x + threadIdx.x];
}
C[blockIdx.z*DIM*DIM + row*DIM + col] = result;
}
__global__ void sum(double * C, int n)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
for(int i=1;i<n;i++) C[row*DIM + col] += C[DIM*DIM*i+row*DIM + col];
}
void matrix_multiplication(double * a, double * b, double * c, int dim)
{
double * result = (double *) malloc(sizeof(double)*dim*dim);
int blocksNb = (int) dim/8;
int threadsNb = (int) 8;
int sharedMemory = threadsNb*threadsNb*sizeof(double)*2;
double * d_a; hipMalloc((void **) &d_a, sizeof(double)*dim*dim);
double * d_b; hipMalloc((void **) &d_b, sizeof(double)*dim*dim);
double * d_c; hipMalloc((void **) &d_c, sizeof(double)*dim*dim*blocksNb);
hipMemcpy(d_a, a, sizeof(double)*dim*dim, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(double)*dim*dim, hipMemcpyHostToDevice);
dim3 multiply_threadsPerBlock(threadsNb, threadsNb);
dim3 multiply_blocksPerGrid(blocksNb, blocksNb,blocksNb);
dim3 sum_threadsPerBlock(threadsNb,threadsNb);
dim3 sum_blocksPerGrid(blocksNb,blocksNb);
hipLaunchKernelGGL(( multiply), dim3(multiply_blocksPerGrid), dim3(multiply_threadsPerBlock), sharedMemory, 0, d_a, d_b, d_c, threadsNb*threadsNb);
hipLaunchKernelGGL(( sum), dim3(sum_blocksPerGrid), dim3(sum_threadsPerBlock), 0, 0, d_c, blocksNb);
hipMemcpy(result, d_c, sizeof(double)*dim*dim, hipMemcpyDeviceToHost);
for(int i=0;i<dim;i++) for(int j=0;j<dim;j++) c[i*dim + j] = result[i*dim + j];
free(result);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
| 5024c8039c8564596591aa3404d49ba2367b6c24.cu | #include "matrix.h"
using namespace std;
/*
LIMITE 6144 double ou 49152 bytes 32 THREAD MAX
64 block max
*/
__global__ void multiply(double * A, double * B, double * C,int local_size)
{
extern __shared__ double shared[];
double * intern_A = &shared[0];
double * intern_B = &shared[local_size];
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
intern_A[threadIdx.y*blockDim.x + threadIdx.x] = A[row*DIM + threadIdx.x+blockDim.x*blockIdx.z];
intern_B[threadIdx.y*blockDim.x + threadIdx.x] = B[(threadIdx.y+blockDim.y*blockIdx.z)*DIM + col];
double result = 0.0;
for(int i=0; i<blockDim.x; i++)
{
result+= intern_A[threadIdx.y*blockDim.x + i]*intern_B[i*blockDim.x + threadIdx.x];
}
C[blockIdx.z*DIM*DIM + row*DIM + col] = result;
}
__global__ void sum(double * C, int n)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
for(int i=1;i<n;i++) C[row*DIM + col] += C[DIM*DIM*i+row*DIM + col];
}
void matrix_multiplication(double * a, double * b, double * c, int dim)
{
double * result = (double *) malloc(sizeof(double)*dim*dim);
int blocksNb = (int) dim/8;
int threadsNb = (int) 8;
int sharedMemory = threadsNb*threadsNb*sizeof(double)*2;
double * d_a; cudaMalloc((void **) &d_a, sizeof(double)*dim*dim);
double * d_b; cudaMalloc((void **) &d_b, sizeof(double)*dim*dim);
double * d_c; cudaMalloc((void **) &d_c, sizeof(double)*dim*dim*blocksNb);
cudaMemcpy(d_a, a, sizeof(double)*dim*dim, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(double)*dim*dim, cudaMemcpyHostToDevice);
dim3 multiply_threadsPerBlock(threadsNb, threadsNb);
dim3 multiply_blocksPerGrid(blocksNb, blocksNb,blocksNb);
dim3 sum_threadsPerBlock(threadsNb,threadsNb);
dim3 sum_blocksPerGrid(blocksNb,blocksNb);
multiply<<<multiply_blocksPerGrid, multiply_threadsPerBlock, sharedMemory>>>(d_a, d_b, d_c, threadsNb*threadsNb);
sum<<<sum_blocksPerGrid, sum_threadsPerBlock>>>(d_c, blocksNb);
cudaMemcpy(result, d_c, sizeof(double)*dim*dim, cudaMemcpyDeviceToHost);
for(int i=0;i<dim;i++) for(int j=0;j<dim;j++) c[i*dim + j] = result[i*dim + j];
free(result);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
f4f74f08ee8b1635d0aadfc5b3532b19ac83761b.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2022-2023 by XGBoost Contributors
*/
#include <thrust/sort.h>
#include <cstdint> // std::int32_t
#include <hipcub/hipcub.hpp> // NOLINT
#include "../common/cuda_context.cuh" // CUDAContext
#include "../common/device_helpers.cuh"
#include "../common/stats.cuh"
#include "adaptive.h"
#include "xgboost/context.h"
namespace xgboost {
namespace obj {
namespace detail {
void EncodeTreeLeafDevice(Context const* ctx, common::Span<bst_node_t const> position,
dh::device_vector<size_t>* p_ridx, HostDeviceVector<size_t>* p_nptr,
HostDeviceVector<bst_node_t>* p_nidx, RegTree const& tree) {
// copy position to buffer
dh::safe_cuda(hipSetDevice(ctx->gpu_id));
auto cuctx = ctx->CUDACtx();
size_t n_samples = position.size();
dh::device_vector<bst_node_t> sorted_position(position.size());
dh::safe_cuda(hipMemcpyAsync(sorted_position.data().get(), position.data(),
position.size_bytes(), hipMemcpyDeviceToDevice, cuctx->Stream()));
p_ridx->resize(position.size());
dh::Iota(dh::ToSpan(*p_ridx));
// sort row index according to node index
thrust::stable_sort_by_key(cuctx->TP(), sorted_position.begin(),
sorted_position.begin() + n_samples, p_ridx->begin());
size_t beg_pos =
thrust::find_if(cuctx->CTP(), sorted_position.cbegin(), sorted_position.cend(),
[] XGBOOST_DEVICE(bst_node_t nidx) { return nidx >= 0; }) -
sorted_position.cbegin();
if (beg_pos == sorted_position.size()) {
auto& leaf = p_nidx->HostVector();
tree.WalkTree([&](bst_node_t nidx) {
if (tree[nidx].IsLeaf()) {
leaf.push_back(nidx);
}
return true;
});
return;
}
size_t n_leaf = tree.GetNumLeaves();
size_t max_n_unique = n_leaf;
dh::caching_device_vector<size_t> counts_out(max_n_unique + 1, 0);
auto d_counts_out = dh::ToSpan(counts_out).subspan(0, max_n_unique);
auto d_num_runs_out = dh::ToSpan(counts_out).subspan(max_n_unique, 1);
dh::caching_device_vector<bst_node_t> unique_out(max_n_unique, 0);
auto d_unique_out = dh::ToSpan(unique_out);
size_t nbytes{0};
auto begin_it = sorted_position.begin() + beg_pos;
dh::safe_cuda(hipcub::DeviceRunLengthEncode::Encode(
nullptr, nbytes, begin_it, unique_out.data().get(), counts_out.data().get(),
d_num_runs_out.data(), n_samples - beg_pos, ctx->CUDACtx()->Stream()));
dh::TemporaryArray<char> temp(nbytes);
dh::safe_cuda(hipcub::DeviceRunLengthEncode::Encode(
temp.data().get(), nbytes, begin_it, unique_out.data().get(), counts_out.data().get(),
d_num_runs_out.data(), n_samples - beg_pos, ctx->CUDACtx()->Stream()));
dh::PinnedMemory pinned_pool;
auto pinned = pinned_pool.GetSpan<char>(sizeof(size_t) + sizeof(bst_node_t));
dh::HIPStreamMasqueradingAsCUDA copy_stream;
size_t* h_num_runs = reinterpret_cast<size_t*>(pinned.subspan(0, sizeof(size_t)).data());
dh::CUDAEvent e;
e.Record(cuctx->Stream());
copy_stream.View().Wait(e);
// flag for whether there's ignored position
bst_node_t* h_first_unique =
reinterpret_cast<bst_node_t*>(pinned.subspan(sizeof(size_t), sizeof(bst_node_t)).data());
dh::safe_cuda(hipMemcpyAsync(h_num_runs, d_num_runs_out.data(), sizeof(size_t),
hipMemcpyDeviceToHost, copy_stream.View()));
dh::safe_cuda(hipMemcpyAsync(h_first_unique, d_unique_out.data(), sizeof(bst_node_t),
hipMemcpyDeviceToHost, copy_stream.View()));
/**
* copy node index (leaf index)
*/
auto& nidx = *p_nidx;
auto& nptr = *p_nptr;
nidx.SetDevice(ctx->gpu_id);
nidx.Resize(n_leaf);
auto d_node_idx = nidx.DeviceSpan();
nptr.SetDevice(ctx->gpu_id);
nptr.Resize(n_leaf + 1, 0);
auto d_node_ptr = nptr.DeviceSpan();
dh::LaunchN(n_leaf, [=] XGBOOST_DEVICE(size_t i) {
if (i >= d_num_runs_out[0]) {
// d_num_runs_out <= max_n_unique
// this omits all the leaf that are empty. A leaf can be empty when there's
// missing data, which can be caused by sparse input and distributed training.
return;
}
d_node_idx[i] = d_unique_out[i];
d_node_ptr[i + 1] = d_counts_out[i];
if (i == 0) {
d_node_ptr[0] = beg_pos;
}
});
thrust::inclusive_scan(cuctx->CTP(), dh::tbegin(d_node_ptr), dh::tend(d_node_ptr),
dh::tbegin(d_node_ptr));
copy_stream.View().Sync();
CHECK_GT(*h_num_runs, 0);
CHECK_LE(*h_num_runs, n_leaf);
if (*h_num_runs < n_leaf) {
// shrink to omit the sampled nodes.
nptr.Resize(*h_num_runs + 1);
nidx.Resize(*h_num_runs);
std::vector<bst_node_t> leaves;
tree.WalkTree([&](bst_node_t nidx) {
if (tree[nidx].IsLeaf()) {
leaves.push_back(nidx);
}
return true;
});
CHECK_EQ(leaves.size(), n_leaf);
// Fill all the leaves that don't have any sample. This is hacky and inefficient. An
// alternative is to leave the objective to handle missing leaf, which is more messy
// as we need to take other distributed workers into account.
auto& h_nidx = nidx.HostVector();
auto& h_nptr = nptr.HostVector();
FillMissingLeaf(leaves, &h_nidx, &h_nptr);
nidx.DevicePointer();
nptr.DevicePointer();
}
CHECK_EQ(nidx.Size(), n_leaf);
CHECK_EQ(nptr.Size(), n_leaf + 1);
}
void UpdateTreeLeafDevice(Context const* ctx, common::Span<bst_node_t const> position,
std::int32_t group_idx, MetaInfo const& info,
HostDeviceVector<float> const& predt, float alpha, RegTree* p_tree) {
dh::safe_cuda(hipSetDevice(ctx->gpu_id));
dh::device_vector<size_t> ridx;
HostDeviceVector<size_t> nptr;
HostDeviceVector<bst_node_t> nidx;
EncodeTreeLeafDevice(ctx, position, &ridx, &nptr, &nidx, *p_tree);
if (nptr.Empty()) {
std::vector<float> quantiles;
UpdateLeafValues(&quantiles, nidx.ConstHostVector(), p_tree);
}
HostDeviceVector<float> quantiles;
predt.SetDevice(ctx->gpu_id);
auto d_predt = linalg::MakeTensorView(predt.ConstDeviceSpan(),
{info.num_row_, predt.Size() / info.num_row_}, ctx->gpu_id);
CHECK_LT(group_idx, d_predt.Shape(1));
auto t_predt = d_predt.Slice(linalg::All(), group_idx);
auto d_labels = info.labels.View(ctx->gpu_id).Slice(linalg::All(), IdxY(info, group_idx));
auto d_row_index = dh::ToSpan(ridx);
auto seg_beg = nptr.DevicePointer();
auto seg_end = seg_beg + nptr.Size();
auto val_beg = dh::MakeTransformIterator<float>(thrust::make_counting_iterator(0ul),
[=] XGBOOST_DEVICE(size_t i) {
float p = t_predt(d_row_index[i]);
auto y = d_labels(d_row_index[i]);
return y - p;
});
CHECK_EQ(d_labels.Shape(0), position.size());
auto val_end = val_beg + d_labels.Shape(0);
CHECK_EQ(nidx.Size() + 1, nptr.Size());
if (info.weights_.Empty()) {
common::SegmentedQuantile(ctx, alpha, seg_beg, seg_end, val_beg, val_end, &quantiles);
} else {
info.weights_.SetDevice(ctx->gpu_id);
auto d_weights = info.weights_.ConstDeviceSpan();
CHECK_EQ(d_weights.size(), d_row_index.size());
auto w_it = thrust::make_permutation_iterator(dh::tcbegin(d_weights), dh::tcbegin(d_row_index));
common::SegmentedWeightedQuantile(ctx, alpha, seg_beg, seg_end, val_beg, val_end, w_it,
w_it + d_weights.size(), &quantiles);
}
UpdateLeafValues(&quantiles.HostVector(), nidx.ConstHostVector(), p_tree);
}
} // namespace detail
} // namespace obj
} // namespace xgboost
| f4f74f08ee8b1635d0aadfc5b3532b19ac83761b.cu | /**
* Copyright 2022-2023 by XGBoost Contributors
*/
#include <thrust/sort.h>
#include <cstdint> // std::int32_t
#include <cub/cub.cuh> // NOLINT
#include "../common/cuda_context.cuh" // CUDAContext
#include "../common/device_helpers.cuh"
#include "../common/stats.cuh"
#include "adaptive.h"
#include "xgboost/context.h"
namespace xgboost {
namespace obj {
namespace detail {
void EncodeTreeLeafDevice(Context const* ctx, common::Span<bst_node_t const> position,
dh::device_vector<size_t>* p_ridx, HostDeviceVector<size_t>* p_nptr,
HostDeviceVector<bst_node_t>* p_nidx, RegTree const& tree) {
// copy position to buffer
dh::safe_cuda(cudaSetDevice(ctx->gpu_id));
auto cuctx = ctx->CUDACtx();
size_t n_samples = position.size();
dh::device_vector<bst_node_t> sorted_position(position.size());
dh::safe_cuda(cudaMemcpyAsync(sorted_position.data().get(), position.data(),
position.size_bytes(), cudaMemcpyDeviceToDevice, cuctx->Stream()));
p_ridx->resize(position.size());
dh::Iota(dh::ToSpan(*p_ridx));
// sort row index according to node index
thrust::stable_sort_by_key(cuctx->TP(), sorted_position.begin(),
sorted_position.begin() + n_samples, p_ridx->begin());
size_t beg_pos =
thrust::find_if(cuctx->CTP(), sorted_position.cbegin(), sorted_position.cend(),
[] XGBOOST_DEVICE(bst_node_t nidx) { return nidx >= 0; }) -
sorted_position.cbegin();
if (beg_pos == sorted_position.size()) {
auto& leaf = p_nidx->HostVector();
tree.WalkTree([&](bst_node_t nidx) {
if (tree[nidx].IsLeaf()) {
leaf.push_back(nidx);
}
return true;
});
return;
}
size_t n_leaf = tree.GetNumLeaves();
size_t max_n_unique = n_leaf;
dh::caching_device_vector<size_t> counts_out(max_n_unique + 1, 0);
auto d_counts_out = dh::ToSpan(counts_out).subspan(0, max_n_unique);
auto d_num_runs_out = dh::ToSpan(counts_out).subspan(max_n_unique, 1);
dh::caching_device_vector<bst_node_t> unique_out(max_n_unique, 0);
auto d_unique_out = dh::ToSpan(unique_out);
size_t nbytes{0};
auto begin_it = sorted_position.begin() + beg_pos;
dh::safe_cuda(cub::DeviceRunLengthEncode::Encode(
nullptr, nbytes, begin_it, unique_out.data().get(), counts_out.data().get(),
d_num_runs_out.data(), n_samples - beg_pos, ctx->CUDACtx()->Stream()));
dh::TemporaryArray<char> temp(nbytes);
dh::safe_cuda(cub::DeviceRunLengthEncode::Encode(
temp.data().get(), nbytes, begin_it, unique_out.data().get(), counts_out.data().get(),
d_num_runs_out.data(), n_samples - beg_pos, ctx->CUDACtx()->Stream()));
dh::PinnedMemory pinned_pool;
auto pinned = pinned_pool.GetSpan<char>(sizeof(size_t) + sizeof(bst_node_t));
dh::CUDAStream copy_stream;
size_t* h_num_runs = reinterpret_cast<size_t*>(pinned.subspan(0, sizeof(size_t)).data());
dh::CUDAEvent e;
e.Record(cuctx->Stream());
copy_stream.View().Wait(e);
// flag for whether there's ignored position
bst_node_t* h_first_unique =
reinterpret_cast<bst_node_t*>(pinned.subspan(sizeof(size_t), sizeof(bst_node_t)).data());
dh::safe_cuda(cudaMemcpyAsync(h_num_runs, d_num_runs_out.data(), sizeof(size_t),
cudaMemcpyDeviceToHost, copy_stream.View()));
dh::safe_cuda(cudaMemcpyAsync(h_first_unique, d_unique_out.data(), sizeof(bst_node_t),
cudaMemcpyDeviceToHost, copy_stream.View()));
/**
* copy node index (leaf index)
*/
auto& nidx = *p_nidx;
auto& nptr = *p_nptr;
nidx.SetDevice(ctx->gpu_id);
nidx.Resize(n_leaf);
auto d_node_idx = nidx.DeviceSpan();
nptr.SetDevice(ctx->gpu_id);
nptr.Resize(n_leaf + 1, 0);
auto d_node_ptr = nptr.DeviceSpan();
dh::LaunchN(n_leaf, [=] XGBOOST_DEVICE(size_t i) {
if (i >= d_num_runs_out[0]) {
// d_num_runs_out <= max_n_unique
// this omits all the leaf that are empty. A leaf can be empty when there's
// missing data, which can be caused by sparse input and distributed training.
return;
}
d_node_idx[i] = d_unique_out[i];
d_node_ptr[i + 1] = d_counts_out[i];
if (i == 0) {
d_node_ptr[0] = beg_pos;
}
});
thrust::inclusive_scan(cuctx->CTP(), dh::tbegin(d_node_ptr), dh::tend(d_node_ptr),
dh::tbegin(d_node_ptr));
copy_stream.View().Sync();
CHECK_GT(*h_num_runs, 0);
CHECK_LE(*h_num_runs, n_leaf);
if (*h_num_runs < n_leaf) {
// shrink to omit the sampled nodes.
nptr.Resize(*h_num_runs + 1);
nidx.Resize(*h_num_runs);
std::vector<bst_node_t> leaves;
tree.WalkTree([&](bst_node_t nidx) {
if (tree[nidx].IsLeaf()) {
leaves.push_back(nidx);
}
return true;
});
CHECK_EQ(leaves.size(), n_leaf);
// Fill all the leaves that don't have any sample. This is hacky and inefficient. An
// alternative is to leave the objective to handle missing leaf, which is more messy
// as we need to take other distributed workers into account.
auto& h_nidx = nidx.HostVector();
auto& h_nptr = nptr.HostVector();
FillMissingLeaf(leaves, &h_nidx, &h_nptr);
nidx.DevicePointer();
nptr.DevicePointer();
}
CHECK_EQ(nidx.Size(), n_leaf);
CHECK_EQ(nptr.Size(), n_leaf + 1);
}
void UpdateTreeLeafDevice(Context const* ctx, common::Span<bst_node_t const> position,
std::int32_t group_idx, MetaInfo const& info,
HostDeviceVector<float> const& predt, float alpha, RegTree* p_tree) {
dh::safe_cuda(cudaSetDevice(ctx->gpu_id));
dh::device_vector<size_t> ridx;
HostDeviceVector<size_t> nptr;
HostDeviceVector<bst_node_t> nidx;
EncodeTreeLeafDevice(ctx, position, &ridx, &nptr, &nidx, *p_tree);
if (nptr.Empty()) {
std::vector<float> quantiles;
UpdateLeafValues(&quantiles, nidx.ConstHostVector(), p_tree);
}
HostDeviceVector<float> quantiles;
predt.SetDevice(ctx->gpu_id);
auto d_predt = linalg::MakeTensorView(predt.ConstDeviceSpan(),
{info.num_row_, predt.Size() / info.num_row_}, ctx->gpu_id);
CHECK_LT(group_idx, d_predt.Shape(1));
auto t_predt = d_predt.Slice(linalg::All(), group_idx);
auto d_labels = info.labels.View(ctx->gpu_id).Slice(linalg::All(), IdxY(info, group_idx));
auto d_row_index = dh::ToSpan(ridx);
auto seg_beg = nptr.DevicePointer();
auto seg_end = seg_beg + nptr.Size();
auto val_beg = dh::MakeTransformIterator<float>(thrust::make_counting_iterator(0ul),
[=] XGBOOST_DEVICE(size_t i) {
float p = t_predt(d_row_index[i]);
auto y = d_labels(d_row_index[i]);
return y - p;
});
CHECK_EQ(d_labels.Shape(0), position.size());
auto val_end = val_beg + d_labels.Shape(0);
CHECK_EQ(nidx.Size() + 1, nptr.Size());
if (info.weights_.Empty()) {
common::SegmentedQuantile(ctx, alpha, seg_beg, seg_end, val_beg, val_end, &quantiles);
} else {
info.weights_.SetDevice(ctx->gpu_id);
auto d_weights = info.weights_.ConstDeviceSpan();
CHECK_EQ(d_weights.size(), d_row_index.size());
auto w_it = thrust::make_permutation_iterator(dh::tcbegin(d_weights), dh::tcbegin(d_row_index));
common::SegmentedWeightedQuantile(ctx, alpha, seg_beg, seg_end, val_beg, val_end, w_it,
w_it + d_weights.size(), &quantiles);
}
UpdateLeafValues(&quantiles.HostVector(), nidx.ConstHostVector(), p_tree);
}
} // namespace detail
} // namespace obj
} // namespace xgboost
|
60a6188aa01c10dcae10cc053ae9e4ce35700fe3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
// Host input vectors.
float *h_a;
float *h_b;
// Host output vector.
float *h_c;
// Device input vectors.
float *d_a;
float *d_b;
// Device output vector.
float *d_c;
// Size of arrays.
int n = 0;
/* CUDA kernel. Each thread takes care of one element of c. */
__global__ void vecAdd(float *a, float *b, float *c, int n) {
int id = threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
void init_array() {
fprintf(stdout, "Inicializando os arrays.\n");
int i;
// Initialize vectors on host.
for (i = 0; i < n; i++) {
h_a[i] = sinf(i) * sinf(i);
h_b[i] = cosf(i) * cosf(i);
}
}
void print_array() {
int i;
printf("Imprimindo o Resultado.\n");
for (i = 0; i < n; i++) {
fprintf(stdout, "h_c[%07d]: %f\n", i, h_c[i]);
}
}
void check_result(){
// Soma dos elementos do array C e divide por N, o valor deve ser igual a 1.
int i;
float sum = 0;
fprintf(stdout, "Verificando o Resultado.\n");
for (i = 0; i < n; i++) {
sum += h_c[i];
}
fprintf(stdout, "Resultado Final: (%f, %f)\n", sum, (float)(sum / (float)n));
}
/* Main code */
int main(int argc, char *argv[]) {
if(argc < 2){
printf("Uso: %s <n>\n", argv[0]);
exit(0);
}
// Size of vectors
n = atoi(argv[1]);
printf("Nmero de Elementos: %d\n", n);
// Size, in bytes, of each vector
size_t bytes = n * sizeof(float);
printf("Memria que ser alocada para os 3 arrays: %d\n", 3 * bytes);
printf("Allocate memory for each vector on host\n");
// Allocate memory for each vector on host
h_a = (float *)malloc(bytes);
h_b = (float *)malloc(bytes);
h_c = (float *)malloc(bytes);
printf("Allocate memory for each vector on GPU\n");
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
printf("Initialize vectors on host\n");
init_array();
printf("Copy host vectors to device\n");
// Copy host vectors to device
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
printf("Execute the kernel\n");
hipLaunchKernelGGL(( vecAdd) , dim3(1),dim3(n), 0, 0, d_a, d_b, d_c, n);
printf("Copy array back to host\n");
// Copy array back to host
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
print_array();
check_result();
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| 60a6188aa01c10dcae10cc053ae9e4ce35700fe3.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
// Host input vectors.
float *h_a;
float *h_b;
// Host output vector.
float *h_c;
// Device input vectors.
float *d_a;
float *d_b;
// Device output vector.
float *d_c;
// Size of arrays.
int n = 0;
/* CUDA kernel. Each thread takes care of one element of c. */
__global__ void vecAdd(float *a, float *b, float *c, int n) {
int id = threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
void init_array() {
fprintf(stdout, "Inicializando os arrays.\n");
int i;
// Initialize vectors on host.
for (i = 0; i < n; i++) {
h_a[i] = sinf(i) * sinf(i);
h_b[i] = cosf(i) * cosf(i);
}
}
void print_array() {
int i;
printf("Imprimindo o Resultado.\n");
for (i = 0; i < n; i++) {
fprintf(stdout, "h_c[%07d]: %f\n", i, h_c[i]);
}
}
void check_result(){
// Soma dos elementos do array C e divide por N, o valor deve ser igual a 1.
int i;
float sum = 0;
fprintf(stdout, "Verificando o Resultado.\n");
for (i = 0; i < n; i++) {
sum += h_c[i];
}
fprintf(stdout, "Resultado Final: (%f, %f)\n", sum, (float)(sum / (float)n));
}
/* Main code */
int main(int argc, char *argv[]) {
if(argc < 2){
printf("Uso: %s <n>\n", argv[0]);
exit(0);
}
// Size of vectors
n = atoi(argv[1]);
printf("Número de Elementos: %d\n", n);
// Size, in bytes, of each vector
size_t bytes = n * sizeof(float);
printf("Memória que será alocada para os 3 arrays: %d\n", 3 * bytes);
printf("Allocate memory for each vector on host\n");
// Allocate memory for each vector on host
h_a = (float *)malloc(bytes);
h_b = (float *)malloc(bytes);
h_c = (float *)malloc(bytes);
printf("Allocate memory for each vector on GPU\n");
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
printf("Initialize vectors on host\n");
init_array();
printf("Copy host vectors to device\n");
// Copy host vectors to device
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
printf("Execute the kernel\n");
vecAdd <<<1,n>>> (d_a, d_b, d_c, n);
printf("Copy array back to host\n");
// Copy array back to host
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
print_array();
check_result();
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
0a0862b3508657d4996a432deb8235cbe56871fe.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <openacc.h>
#define IPMACC_MAX1(A) (A)
#define IPMACC_MAX2(A,B) (A>B?A:B)
#define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B))
#ifdef __cplusplus
#include "openacc_container.h"
#endif
#include <hip/hip_runtime.h>
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include "../../common/polybenchUtilFuncts.h"
#define ERROR_THRESHOLD 0.05
#define GPU_DEVICE 1
#define N 8192
typedef float DATA_TYPE;
void init(DATA_TYPE * A, DATA_TYPE * B)
{
int i;
for (i = 0; i < N; i++) {
A [i] = i / 2.0;
B [i] = ((N - 1) - i) / 3.0;
}
return;
}
__global__ void __generated_kernel_region_0(DATA_TYPE * A,DATA_TYPE * B,DATA_TYPE * C);
void GPU__vec_mult(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C)
{
int i;
ipmacc_prompt((char*)"IPMACC: memory allocation C\n");
acc_present_or_create((void*)C,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation B\n");
acc_present_or_create((void*)B,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation A\n");
acc_present_or_create((void*)A,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin C\n");
acc_pcopyin((void*)C,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin B\n");
acc_pcopyin((void*)B,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin A\n");
acc_pcopyin((void*)A,(8191+0)*sizeof(DATA_TYPE ));
{
/* kernel call statement [0, -1]*/
{
if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((N))-(0+0)))/(1)))/256+(((((abs((int)((N))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL((
__generated_kernel_region_0), dim3((((abs((int)((N))-(0+0)))/(1)))/256+(((((abs((int)((N))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0,
(DATA_TYPE *)acc_deviceptr((void*)A),
(DATA_TYPE *)acc_deviceptr((void*)B),
(DATA_TYPE *)acc_deviceptr((void*)C));
}
/* kernel call statement*/
if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n");
{
hipError_t err=hipDeviceSynchronize();
if(err!=hipSuccess){
printf("Kernel Launch Error! error code (%d)\n",err);
assert(0&&"Launch Failure!\n");}
}
}
ipmacc_prompt((char*)"IPMACC: memory copyout C\n");
acc_copyout_and_keep((void*)C,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyout B\n");
acc_copyout_and_keep((void*)B,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyout A\n");
acc_copyout_and_keep((void*)A,(8191+0)*sizeof(DATA_TYPE ));
}
void CPU__vec_mult(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C)
{
int i;
for (i = 0; i < N; i++) {
C [i] = A [i] * B [i];
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_GPU)
{
int i, fail;
fail = 0;
for (i = 0; i < N; i++) {
if (percentDiff(B [i], B_GPU [i]) > ERROR_THRESHOLD) {
fail++;
}
}
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", ERROR_THRESHOLD, fail);
}
int main(int argc, char *argv[])
{
double t_start, t_end, t_start_OMP, t_end_OMP;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* C_GPU;
A = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
C = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
C_GPU = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
fprintf(stdout, ">> Two vector multiplication <<\n");
init(A, B);
t_start_OMP = rtclock();
CPU__vec_mult(A, B, C_GPU);
t_end_OMP = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end_OMP - t_start_OMP);
t_start = rtclock();
GPU__vec_mult(A, B, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(C, C_GPU);
free(A);
free(B);
free(C);
free(C_GPU);
return 0;
}
__global__ void __generated_kernel_region_0(DATA_TYPE * A,DATA_TYPE * B,DATA_TYPE * C){
int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x;
int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y;
int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z;
int i;
{
{
{
i=0+(__kernel_getuid_x);
if( i < N)
{
C [i] = A [i] * B [i];
}
}
}
}
//append writeback of scalar variables
}
| 0a0862b3508657d4996a432deb8235cbe56871fe.cu | #include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <openacc.h>
#define IPMACC_MAX1(A) (A)
#define IPMACC_MAX2(A,B) (A>B?A:B)
#define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B))
#ifdef __cplusplus
#include "openacc_container.h"
#endif
#include <cuda.h>
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include "../../common/polybenchUtilFuncts.h"
#define ERROR_THRESHOLD 0.05
#define GPU_DEVICE 1
#define N 8192
typedef float DATA_TYPE;
void init(DATA_TYPE * A, DATA_TYPE * B)
{
int i;
for (i = 0; i < N; i++) {
A [i] = i / 2.0;
B [i] = ((N - 1) - i) / 3.0;
}
return;
}
__global__ void __generated_kernel_region_0(DATA_TYPE * A,DATA_TYPE * B,DATA_TYPE * C);
void GPU__vec_mult(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C)
{
int i;
ipmacc_prompt((char*)"IPMACC: memory allocation C\n");
acc_present_or_create((void*)C,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation B\n");
acc_present_or_create((void*)B,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory allocation A\n");
acc_present_or_create((void*)A,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin C\n");
acc_pcopyin((void*)C,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin B\n");
acc_pcopyin((void*)B,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyin A\n");
acc_pcopyin((void*)A,(8191+0)*sizeof(DATA_TYPE ));
{
/* kernel call statement [0, -1]*/
{
if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((N))-(0+0)))/(1)))/256+(((((abs((int)((N))-(0+0)))/(1)))%(256))==0?0:1),256);
__generated_kernel_region_0<<<(((abs((int)((N))-(0+0)))/(1)))/256+(((((abs((int)((N))-(0+0)))/(1)))%(256))==0?0:1),256>>>(
(DATA_TYPE *)acc_deviceptr((void*)A),
(DATA_TYPE *)acc_deviceptr((void*)B),
(DATA_TYPE *)acc_deviceptr((void*)C));
}
/* kernel call statement*/
if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n");
{
cudaError err=cudaDeviceSynchronize();
if(err!=cudaSuccess){
printf("Kernel Launch Error! error code (%d)\n",err);
assert(0&&"Launch Failure!\n");}
}
}
ipmacc_prompt((char*)"IPMACC: memory copyout C\n");
acc_copyout_and_keep((void*)C,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyout B\n");
acc_copyout_and_keep((void*)B,(8191+0)*sizeof(DATA_TYPE ));
ipmacc_prompt((char*)"IPMACC: memory copyout A\n");
acc_copyout_and_keep((void*)A,(8191+0)*sizeof(DATA_TYPE ));
}
void CPU__vec_mult(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C)
{
int i;
for (i = 0; i < N; i++) {
C [i] = A [i] * B [i];
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_GPU)
{
int i, fail;
fail = 0;
for (i = 0; i < N; i++) {
if (percentDiff(B [i], B_GPU [i]) > ERROR_THRESHOLD) {
fail++;
}
}
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", ERROR_THRESHOLD, fail);
}
int main(int argc, char *argv[])
{
double t_start, t_end, t_start_OMP, t_end_OMP;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* C_GPU;
A = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
C = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
C_GPU = (DATA_TYPE*)malloc(N * sizeof(DATA_TYPE));
fprintf(stdout, ">> Two vector multiplication <<\n");
init(A, B);
t_start_OMP = rtclock();
CPU__vec_mult(A, B, C_GPU);
t_end_OMP = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end_OMP - t_start_OMP);
t_start = rtclock();
GPU__vec_mult(A, B, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(C, C_GPU);
free(A);
free(B);
free(C);
free(C_GPU);
return 0;
}
__global__ void __generated_kernel_region_0(DATA_TYPE * A,DATA_TYPE * B,DATA_TYPE * C){
int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x;
int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y;
int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z;
int i;
{
{
{
i=0+(__kernel_getuid_x);
if( i < N)
{
C [i] = A [i] * B [i];
}
}
}
}
//append writeback of scalar variables
}
|
bbc3508afca7adacadaba3de30316affa835b0a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <limits>
#include <ATen/ATen.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/core/Array.h>
#include <ATen/hip/cub.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/native/hip/SortUtils.cuh>
#include <ATen/native/hip/SortingCommon.cuh>
namespace at { namespace native {
bool should_use_small_sort(const Tensor &self, int64_t dim) {
int64_t ndim = self.dim();
int64_t nsort = self.sizes()[dim];
int64_t threshold;
if (self.scalar_type() == kLong || self.scalar_type() == kDouble) {
threshold = 1024;
} else {
threshold = 2048;
}
return nsort <= threshold;
}
std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim);
void fillSliceWithIndex(Tensor& t,int dim) {
if (t.numel()) {
auto sizes = DimVector(t.dim(), 1);
sizes[dim] = t.sizes()[dim];
auto range = at::arange(t.sizes()[dim], t.options());
auto rangeview = range.view(sizes);
t.copy_(rangeview);
}
}
// In alignment with default sort on a c++ map, this function
// will permute key and value tensors identically, and
// in such a way that the 'key' tensor is ordered numerically
void sortKeyValueInplace(const Tensor& key,
const Tensor& value,
int dim, bool dir) {
TORCH_CHECK(key.sizes() == value.sizes(),
"Key tensor must have same size as value tensor");
int dims = value.dim();
TORCH_CHECK(dims <= MAX_DIMS, "value tensor has too many dimensions");
// if key and value tensors have the same size, we do not need to check both
ptrdiff_t inElements = key.numel();
if (inElements == 0) {
return;
}
int64_t keySliceSize = key.size(dim);
ptrdiff_t keySlices = inElements / keySliceSize;
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize);
// FIXME: We'd have to find some other trick with Thrust to perform a
// vectorized (key, value) sort by slice segment
TORCH_INTERNAL_ASSERT(ceilPowerOf2 <= 2048, "sortKeyValueInplace only works for sizes <= 2048 at present");
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(keySlices, grid), "Too many slices to sort");
#define HANDLE_CASE(TYPE, A, SIZE) \
do { \
int blockSize = SIZE / 2; \
if (blockSize < 1) { \
blockSize = 1; \
} \
\
dim3 block(blockSize); \
\
if (dir) { \
hipLaunchKernelGGL(( bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \
GTComp<scalar_t, true>, TYPE, SIZE>) \
, dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
GTComp<scalar_t, true>()); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} else { \
hipLaunchKernelGGL(( bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \
LTComp<scalar_t, true>, TYPE, SIZE>) \
, dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
LTComp<scalar_t, true>()); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
} while (0)
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
case 512: \
case 256: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 128: \
case 64: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 32: \
case 16: \
case 8: \
case 4: \
case 2: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 1: \
/* Nothing to do, data already sorted */ \
break; \
default: \
TORCH_INTERNAL_ASSERT(false); \
} \
}
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, key.scalar_type(), "sortKeyValueInplace", [&] {
if (at::cuda::detail::canUse32BitIndexMath(key)) {
at::cuda::detail::TensorInfo<scalar_t, unsigned int> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(key);
at::cuda::detail::TensorInfo<int64_t, unsigned int> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, unsigned int>(value);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
if (keyInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned int, -2);
} else {
switch (keyInfo.dims) {
case 2:
HANDLE_SORT_CASE(unsigned int, 2);
break;
default:
HANDLE_SORT_CASE(unsigned int, -1);
break;
}
}
} else {
at::cuda::detail::TensorInfo<scalar_t, uint64_t> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(key);
at::cuda::detail::TensorInfo<int64_t, uint64_t> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, uint64_t>(value);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
// int64_t case is rare, just instantiate the generic version
HANDLE_SORT_CASE(uint64_t, -1);
}
});
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
}
namespace {
struct offset_t {
int stride;
int begin;
__device__ int operator[](int i) {
return stride * (begin + i);
}
};
}
// We perform a segmented sort in cub with inputs that have
// more than 1024/2048 elements along the selected dimension.
// Otherwise, we do an inplace bitonic sort (see sortKeyValueInplace).
std::tuple<Tensor &,Tensor &> sort_out_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending, Tensor & values, Tensor & indices) {
// this algorithm is always stable
TORCH_INTERNAL_ASSERT(stable.has_value(), "sort_out(): c10::optional<bool> for stable has to have value.");
TensorArg self_arg{self, "self", 1}, values_arg{values, "values", 2}, indices_arg{indices, "indices", 3};
checkAllSameGPU(__func__, {self_arg, values_arg, indices_arg});
bool is_non_overlapping_and_dense = self.is_non_overlapping_and_dense();
int64_t numel = self.numel();
int64_t ndim = self.dim();
dim = maybe_wrap_dim(dim, ndim);
int64_t nsort = self.sizes()[dim];
TORCH_CHECK(nsort <= std::numeric_limits<int>::max(),
"The dimension being sorted can not have more than INT_MAX elements.");
const auto self_dtype = self.dtype();
// FIXME: remove this check once cub sort supports bool
TORCH_CHECK(self_dtype != ScalarType::Bool,
"Sort currently does not support bool dtype on CUDA.");
TORCH_CHECK(self_dtype != ScalarType::ComplexFloat && self_dtype != ScalarType::ComplexDouble,
"Sort currently does not support complex dtypes on CUDA.");
if (ndim == 0) {
if (!values.defined()) {
values = self.clone();
} else {
values.resize_as_(self);
values.copy_(self);
}
if (!indices.defined()) {
indices = at::zeros({}, self.options().dtype(kLong));
} else {
indices.resize_as_(self);
indices.zero_();
}
return std::forward_as_tuple(values, indices);
}
// use inplace algorithm for smaller input sizes without stable=True
if (should_use_small_sort(self, dim) && !stable.value()) {
// from thc: sorted->values, indices->indices, input->self
if (!values.defined()) {
values = at::empty_like(self);
}
if (!indices.defined()) {
indices = at::empty_like(self, self.options().dtype(kLong));
}
// Make sure sufficient output space is allocated
auto self_size = self.sizes();
at::native::resize_output(values, self_size);
at::native::resize_output(indices, self_size);
fillSliceWithIndex(indices, dim);
// We sort k/v pairs in-place; copy unsorted input to output
values.copy_(self);
// Sort using our in-place k/v kernel that supports arbitrary
// layout
sortKeyValueInplace(values, indices, dim, descending);
return std::forward_as_tuple(values, indices);
}
Tensor self_;
bool newself = false;
if (is_non_overlapping_and_dense && self.stride(dim) == 1) {
self_ = self;
} else {
auto new_strides_unsort = infer_dense_strides_dim_last(self, dim);
self_ = at::empty_strided(self.sizes(), new_strides_unsort, self.options());
self_.copy_(self);
newself = true;
}
Tensor values_tmp, indices_tmp;
void *values_ptr_;
int64_t *indices_ptr;
if (!values.defined()) {
if (is_non_overlapping_and_dense) {
values = at::empty_strided(self.sizes(), self.strides(), self.options());
} else {
auto strides = at::infer_dense_strides(self.sizes(), self.strides());
values = at::empty_strided(self.sizes(), strides, self.options());
}
} else {
TORCH_CHECK(self_.scalar_type() == values.scalar_type(),
"Unexpected dtype for values, expect ", self_.scalar_type(), ", got ", values.scalar_type());
values.resize_as_(self);
}
if (values.strides() == self_.strides() && (newself || get_overlap_status(self, values) == MemOverlapStatus::NO)) {
values_ptr_ = values.data_ptr();
} else {
values_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options());
values_ptr_ = values_tmp.data_ptr();
}
if (!indices.defined()) {
if (is_non_overlapping_and_dense) {
indices = at::empty_strided(self.sizes(), self.strides(), self.options().dtype(kLong));
} else {
auto strides = at::infer_dense_strides(self.sizes(), self.strides());
indices = at::empty_strided(self.sizes(), strides, self.options().dtype(kLong));
}
} else {
TORCH_CHECK(kLong == indices.scalar_type(),
"Unexpected dtype for values, expect torch.long, got ", indices.scalar_type());
indices.resize_as_(self);
}
if (indices.strides() != self_.strides()) {
indices_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options().dtype(kLong));
indices_ptr = indices_tmp.data_ptr<int64_t>();
} else {
indices_ptr = indices.data_ptr<int64_t>();
}
if (numel == 0) {
return std::forward_as_tuple(values, indices);
}
int64_t numel_or_intmax = ::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max()));
int64_t nbatch = (numel_or_intmax / nsort) * nsort;
#ifdef __HIP_PLATFORM_HCC__
constexpr bool is_rocm = true;
#else
constexpr bool is_rocm = false;
#endif
AT_DISPATCH_ALL_TYPES_AND3(kBool, kHalf, kBFloat16, self_.scalar_type(), "sort", [&]{
c10::guts::if_constexpr<!(is_rocm && std::is_same<scalar_t, c10::BFloat16>::value)>([&](auto _){
const scalar_t *self_ptr = self_.data_ptr<scalar_t>();
auto values_ptr = reinterpret_cast<scalar_t *>(values_ptr_);
int64_t remaining = _(numel);
while (remaining > 0) {
int64_t n = ::min(remaining, nbatch);
int64_t nsegments = n / nsort;
auto reverse_indices = at::arange(nsort, indices.options()).view({1, nsort}).expand({nsegments, nsort}).contiguous();
at::cuda::cub::segmented_sort_pairs(self_ptr, values_ptr,
reverse_indices.data_ptr<int64_t>(), indices_ptr, n, nsegments,
offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending);
remaining -= n;
self_ptr += n;
values_ptr += n;
indices_ptr += n;
}
}, [&](auto _){ TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm"); });
});
if (values_tmp.defined()) {
values.copy_(values_tmp);
}
if (indices_tmp.defined()) {
indices.copy_(indices_tmp);
}
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor &,Tensor &> sort_out_cuda(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices) {
return sort_out_stable_cuda(self, /*stable=*/false, dim, descending, values, indices);
}
std::tuple<Tensor,Tensor> sort_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
Tensor values, indices;
return sort_out_stable_cuda(self, stable, dim, descending, values, indices);
}
std::tuple<Tensor,Tensor> sort_cuda(const Tensor & self, int64_t dim, bool descending) { int64_t threshold;
return sort_stable_cuda(self, /*stable=*/false, dim, descending);
}
}} // namespace at::native
| bbc3508afca7adacadaba3de30316affa835b0a4.cu | #include <limits>
#include <ATen/ATen.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/core/Array.h>
#include <ATen/cuda/cub.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/SortUtils.cuh>
#include <ATen/native/cuda/SortingCommon.cuh>
namespace at { namespace native {
bool should_use_small_sort(const Tensor &self, int64_t dim) {
int64_t ndim = self.dim();
int64_t nsort = self.sizes()[dim];
int64_t threshold;
if (self.scalar_type() == kLong || self.scalar_type() == kDouble) {
threshold = 1024;
} else {
threshold = 2048;
}
return nsort <= threshold;
}
std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim);
void fillSliceWithIndex(Tensor& t,int dim) {
if (t.numel()) {
auto sizes = DimVector(t.dim(), 1);
sizes[dim] = t.sizes()[dim];
auto range = at::arange(t.sizes()[dim], t.options());
auto rangeview = range.view(sizes);
t.copy_(rangeview);
}
}
// In alignment with default sort on a c++ map, this function
// will permute key and value tensors identically, and
// in such a way that the 'key' tensor is ordered numerically
void sortKeyValueInplace(const Tensor& key,
const Tensor& value,
int dim, bool dir) {
TORCH_CHECK(key.sizes() == value.sizes(),
"Key tensor must have same size as value tensor");
int dims = value.dim();
TORCH_CHECK(dims <= MAX_DIMS, "value tensor has too many dimensions");
// if key and value tensors have the same size, we do not need to check both
ptrdiff_t inElements = key.numel();
if (inElements == 0) {
return;
}
int64_t keySliceSize = key.size(dim);
ptrdiff_t keySlices = inElements / keySliceSize;
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize);
// FIXME: We'd have to find some other trick with Thrust to perform a
// vectorized (key, value) sort by slice segment
TORCH_INTERNAL_ASSERT(ceilPowerOf2 <= 2048, "sortKeyValueInplace only works for sizes <= 2048 at present");
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(keySlices, grid), "Too many slices to sort");
#define HANDLE_CASE(TYPE, A, SIZE) \
do { \
int blockSize = SIZE / 2; \
if (blockSize < 1) { \
blockSize = 1; \
} \
\
dim3 block(blockSize); \
\
if (dir) { \
bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \
GTComp<scalar_t, true>, TYPE, SIZE> \
<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
GTComp<scalar_t, true>()); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} else { \
bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \
LTComp<scalar_t, true>, TYPE, SIZE> \
<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
LTComp<scalar_t, true>()); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
} while (0)
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
case 512: \
case 256: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 128: \
case 64: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 32: \
case 16: \
case 8: \
case 4: \
case 2: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 1: \
/* Nothing to do, data already sorted */ \
break; \
default: \
TORCH_INTERNAL_ASSERT(false); \
} \
}
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, key.scalar_type(), "sortKeyValueInplace", [&] {
if (at::cuda::detail::canUse32BitIndexMath(key)) {
at::cuda::detail::TensorInfo<scalar_t, unsigned int> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(key);
at::cuda::detail::TensorInfo<int64_t, unsigned int> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, unsigned int>(value);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
if (keyInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned int, -2);
} else {
switch (keyInfo.dims) {
case 2:
HANDLE_SORT_CASE(unsigned int, 2);
break;
default:
HANDLE_SORT_CASE(unsigned int, -1);
break;
}
}
} else {
at::cuda::detail::TensorInfo<scalar_t, uint64_t> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(key);
at::cuda::detail::TensorInfo<int64_t, uint64_t> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, uint64_t>(value);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
// int64_t case is rare, just instantiate the generic version
HANDLE_SORT_CASE(uint64_t, -1);
}
});
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
}
namespace {
struct offset_t {
int stride;
int begin;
__device__ int operator[](int i) {
return stride * (begin + i);
}
};
}
// We perform a segmented sort in cub with inputs that have
// more than 1024/2048 elements along the selected dimension.
// Otherwise, we do an inplace bitonic sort (see sortKeyValueInplace).
std::tuple<Tensor &,Tensor &> sort_out_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending, Tensor & values, Tensor & indices) {
// this algorithm is always stable
TORCH_INTERNAL_ASSERT(stable.has_value(), "sort_out(): c10::optional<bool> for stable has to have value.");
TensorArg self_arg{self, "self", 1}, values_arg{values, "values", 2}, indices_arg{indices, "indices", 3};
checkAllSameGPU(__func__, {self_arg, values_arg, indices_arg});
bool is_non_overlapping_and_dense = self.is_non_overlapping_and_dense();
int64_t numel = self.numel();
int64_t ndim = self.dim();
dim = maybe_wrap_dim(dim, ndim);
int64_t nsort = self.sizes()[dim];
TORCH_CHECK(nsort <= std::numeric_limits<int>::max(),
"The dimension being sorted can not have more than INT_MAX elements.");
const auto self_dtype = self.dtype();
// FIXME: remove this check once cub sort supports bool
TORCH_CHECK(self_dtype != ScalarType::Bool,
"Sort currently does not support bool dtype on CUDA.");
TORCH_CHECK(self_dtype != ScalarType::ComplexFloat && self_dtype != ScalarType::ComplexDouble,
"Sort currently does not support complex dtypes on CUDA.");
if (ndim == 0) {
if (!values.defined()) {
values = self.clone();
} else {
values.resize_as_(self);
values.copy_(self);
}
if (!indices.defined()) {
indices = at::zeros({}, self.options().dtype(kLong));
} else {
indices.resize_as_(self);
indices.zero_();
}
return std::forward_as_tuple(values, indices);
}
// use inplace algorithm for smaller input sizes without stable=True
if (should_use_small_sort(self, dim) && !stable.value()) {
// from thc: sorted->values, indices->indices, input->self
if (!values.defined()) {
values = at::empty_like(self);
}
if (!indices.defined()) {
indices = at::empty_like(self, self.options().dtype(kLong));
}
// Make sure sufficient output space is allocated
auto self_size = self.sizes();
at::native::resize_output(values, self_size);
at::native::resize_output(indices, self_size);
fillSliceWithIndex(indices, dim);
// We sort k/v pairs in-place; copy unsorted input to output
values.copy_(self);
// Sort using our in-place k/v kernel that supports arbitrary
// layout
sortKeyValueInplace(values, indices, dim, descending);
return std::forward_as_tuple(values, indices);
}
Tensor self_;
bool newself = false;
if (is_non_overlapping_and_dense && self.stride(dim) == 1) {
self_ = self;
} else {
auto new_strides_unsort = infer_dense_strides_dim_last(self, dim);
self_ = at::empty_strided(self.sizes(), new_strides_unsort, self.options());
self_.copy_(self);
newself = true;
}
Tensor values_tmp, indices_tmp;
void *values_ptr_;
int64_t *indices_ptr;
if (!values.defined()) {
if (is_non_overlapping_and_dense) {
values = at::empty_strided(self.sizes(), self.strides(), self.options());
} else {
auto strides = at::infer_dense_strides(self.sizes(), self.strides());
values = at::empty_strided(self.sizes(), strides, self.options());
}
} else {
TORCH_CHECK(self_.scalar_type() == values.scalar_type(),
"Unexpected dtype for values, expect ", self_.scalar_type(), ", got ", values.scalar_type());
values.resize_as_(self);
}
if (values.strides() == self_.strides() && (newself || get_overlap_status(self, values) == MemOverlapStatus::NO)) {
values_ptr_ = values.data_ptr();
} else {
values_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options());
values_ptr_ = values_tmp.data_ptr();
}
if (!indices.defined()) {
if (is_non_overlapping_and_dense) {
indices = at::empty_strided(self.sizes(), self.strides(), self.options().dtype(kLong));
} else {
auto strides = at::infer_dense_strides(self.sizes(), self.strides());
indices = at::empty_strided(self.sizes(), strides, self.options().dtype(kLong));
}
} else {
TORCH_CHECK(kLong == indices.scalar_type(),
"Unexpected dtype for values, expect torch.long, got ", indices.scalar_type());
indices.resize_as_(self);
}
if (indices.strides() != self_.strides()) {
indices_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options().dtype(kLong));
indices_ptr = indices_tmp.data_ptr<int64_t>();
} else {
indices_ptr = indices.data_ptr<int64_t>();
}
if (numel == 0) {
return std::forward_as_tuple(values, indices);
}
int64_t numel_or_intmax = std::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max()));
int64_t nbatch = (numel_or_intmax / nsort) * nsort;
#ifdef __HIP_PLATFORM_HCC__
constexpr bool is_rocm = true;
#else
constexpr bool is_rocm = false;
#endif
AT_DISPATCH_ALL_TYPES_AND3(kBool, kHalf, kBFloat16, self_.scalar_type(), "sort", [&]{
c10::guts::if_constexpr<!(is_rocm && std::is_same<scalar_t, c10::BFloat16>::value)>([&](auto _){
const scalar_t *self_ptr = self_.data_ptr<scalar_t>();
auto values_ptr = reinterpret_cast<scalar_t *>(values_ptr_);
int64_t remaining = _(numel);
while (remaining > 0) {
int64_t n = std::min(remaining, nbatch);
int64_t nsegments = n / nsort;
auto reverse_indices = at::arange(nsort, indices.options()).view({1, nsort}).expand({nsegments, nsort}).contiguous();
at::cuda::cub::segmented_sort_pairs(self_ptr, values_ptr,
reverse_indices.data_ptr<int64_t>(), indices_ptr, n, nsegments,
offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending);
remaining -= n;
self_ptr += n;
values_ptr += n;
indices_ptr += n;
}
}, [&](auto _){ TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm"); });
});
if (values_tmp.defined()) {
values.copy_(values_tmp);
}
if (indices_tmp.defined()) {
indices.copy_(indices_tmp);
}
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor &,Tensor &> sort_out_cuda(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices) {
return sort_out_stable_cuda(self, /*stable=*/false, dim, descending, values, indices);
}
std::tuple<Tensor,Tensor> sort_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
Tensor values, indices;
return sort_out_stable_cuda(self, stable, dim, descending, values, indices);
}
std::tuple<Tensor,Tensor> sort_cuda(const Tensor & self, int64_t dim, bool descending) { int64_t threshold;
return sort_stable_cuda(self, /*stable=*/false, dim, descending);
}
}} // namespace at::native
|
db98fac1884c0793c8e2c44efd33f5c11084b0aa.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cassert>
#include <type_traits>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "DataFormats/HcalRecHit/interface/HBHERecHit.h"
#include "DataFormats/HcalRecHit/interface/HFRecHit.h"
#include "DataFormats/HcalRecHit/interface/HORecHit.h"
#include "DataFormats/HcalRecHit/interface/HFQIE10Info.h"
#include "DataFormats/HcalRecHit/interface/HBHEChannelInfo.h"
template<typename T>
__global__ void kernel_test_hcal_rechits(T *other) {
T rh(HcalDetId(0), 10.0f, 10.0f);
other->setEnergy(rh.energy());
other->setTime(rh.time());
}
__global__ void kernel_test_hcal_hfqie10info() {
HFQIE10Info info;
}
__global__ void kernel_test_hcal_hbhechinfo(HBHEChannelInfo *other) {
HBHEChannelInfo info{true, true};
info.setChannelInfo(
HcalDetId{0},
10, 10, 10, 1,
2.0, 2.0, 2.0,
false, false, false);
other->setChannelInfo(
info.id(),
info.recoShape(), info.nSamples(), info.soi(), info.capid(),
info.darkCurrent(), info.fcByPE(), info.lambda(),
info.hasLinkError(), info.hasCapidError(), info.isDropped()
);
}
void test_hcal_hfqie10info() {
auto check_error = [](auto code) {
if (code != hipSuccess) {
std::cout << hipGetErrorString(code) << std::endl;
assert(false);
}
};
hipLaunchKernelGGL(( kernel_test_hcal_hfqie10info), dim3(1),dim3(1), 0, 0, );
check_error(hipGetLastError());
}
template<typename T>
void test_hcal_rechits() {
auto check_error = [](auto code) {
if (code != hipSuccess) {
std::cout << hipGetErrorString(code) << std::endl;
assert(false);
}
};
T h_rh, h_rh_test{HcalDetId(0), 10.0f, 10.0f};
T *d_rh;
hipMalloc((void**)&d_rh, sizeof(T));
hipMemcpy(d_rh, &h_rh, sizeof(T), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_test_hcal_rechits<T>), dim3(1),dim3(1), 0, 0, d_rh);
hipDeviceSynchronize();
check_error(hipGetLastError());
hipMemcpy(&h_rh, d_rh, sizeof(T), hipMemcpyDeviceToHost);
std::cout << h_rh << std::endl;
std::cout << h_rh_test << std::endl;
assert(h_rh.energy() == h_rh_test.energy());
assert(h_rh.time() == h_rh_test.time());
std::cout << "all good in " << __FUNCTION__ << std::endl;
}
void test_hcal_hbhechinfo() {
auto check_error = [](auto code) {
if (code != hipSuccess) {
std::cout << hipGetErrorString(code) << std::endl;
assert(false);
}
};
HBHEChannelInfo h_info, h_info_test{true, true};
h_info_test.setChannelInfo(
HcalDetId{0},
10, 10, 10, 1,
2.0, 2.0, 2.0,
false, false, false);
HBHEChannelInfo *d_info;
hipMalloc((void**)&d_info, sizeof(HBHEChannelInfo));
hipMemcpy(d_info, &h_info, sizeof(HBHEChannelInfo), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_test_hcal_hbhechinfo), dim3(1),dim3(1), 0, 0, d_info);
hipDeviceSynchronize();
check_error(hipGetLastError());
hipMemcpy(&h_info, d_info, sizeof(HBHEChannelInfo), hipMemcpyDeviceToHost);
assert(h_info.id() == h_info_test.id());
assert(h_info.recoShape() == h_info_test.recoShape());
assert(h_info.nSamples() == h_info_test.nSamples());
assert(h_info.soi() == h_info_test.soi());
assert(h_info.capid() == h_info_test.capid());
assert(h_info.darkCurrent() == h_info_test.darkCurrent());
assert(h_info.fcByPE() == h_info_test.fcByPE());
assert(h_info.lambda() == h_info_test.lambda());
assert(h_info.hasLinkError() == h_info_test.hasLinkError());
assert(h_info.hasCapidError() == h_info_test.hasCapidError());
std::cout << "all good in " << __FUNCTION__ << std::endl;
}
int main(int argc, char ** argv) {
int nDevices;
hipGetDeviceCount(&nDevices);
std::cout << "nDevices = " << nDevices << std::endl;
if (nDevices > 0) {
test_hcal_rechits<HBHERecHit>();
test_hcal_rechits<HFRecHit>();
test_hcal_rechits<HORecHit>();
test_hcal_hbhechinfo();
std::cout << "all good" << std::endl;
}
return 0;
}
| db98fac1884c0793c8e2c44efd33f5c11084b0aa.cu | #include <iostream>
#include <cassert>
#include <type_traits>
#include <cuda.h>
#include <cuda_runtime.h>
#include "DataFormats/HcalRecHit/interface/HBHERecHit.h"
#include "DataFormats/HcalRecHit/interface/HFRecHit.h"
#include "DataFormats/HcalRecHit/interface/HORecHit.h"
#include "DataFormats/HcalRecHit/interface/HFQIE10Info.h"
#include "DataFormats/HcalRecHit/interface/HBHEChannelInfo.h"
template<typename T>
__global__ void kernel_test_hcal_rechits(T *other) {
T rh(HcalDetId(0), 10.0f, 10.0f);
other->setEnergy(rh.energy());
other->setTime(rh.time());
}
__global__ void kernel_test_hcal_hfqie10info() {
HFQIE10Info info;
}
__global__ void kernel_test_hcal_hbhechinfo(HBHEChannelInfo *other) {
HBHEChannelInfo info{true, true};
info.setChannelInfo(
HcalDetId{0},
10, 10, 10, 1,
2.0, 2.0, 2.0,
false, false, false);
other->setChannelInfo(
info.id(),
info.recoShape(), info.nSamples(), info.soi(), info.capid(),
info.darkCurrent(), info.fcByPE(), info.lambda(),
info.hasLinkError(), info.hasCapidError(), info.isDropped()
);
}
void test_hcal_hfqie10info() {
auto check_error = [](auto code) {
if (code != cudaSuccess) {
std::cout << cudaGetErrorString(code) << std::endl;
assert(false);
}
};
kernel_test_hcal_hfqie10info<<<1,1>>>();
check_error(cudaGetLastError());
}
template<typename T>
void test_hcal_rechits() {
auto check_error = [](auto code) {
if (code != cudaSuccess) {
std::cout << cudaGetErrorString(code) << std::endl;
assert(false);
}
};
T h_rh, h_rh_test{HcalDetId(0), 10.0f, 10.0f};
T *d_rh;
cudaMalloc((void**)&d_rh, sizeof(T));
cudaMemcpy(d_rh, &h_rh, sizeof(T), cudaMemcpyHostToDevice);
kernel_test_hcal_rechits<T><<<1,1>>>(d_rh);
cudaDeviceSynchronize();
check_error(cudaGetLastError());
cudaMemcpy(&h_rh, d_rh, sizeof(T), cudaMemcpyDeviceToHost);
std::cout << h_rh << std::endl;
std::cout << h_rh_test << std::endl;
assert(h_rh.energy() == h_rh_test.energy());
assert(h_rh.time() == h_rh_test.time());
std::cout << "all good in " << __FUNCTION__ << std::endl;
}
void test_hcal_hbhechinfo() {
auto check_error = [](auto code) {
if (code != cudaSuccess) {
std::cout << cudaGetErrorString(code) << std::endl;
assert(false);
}
};
HBHEChannelInfo h_info, h_info_test{true, true};
h_info_test.setChannelInfo(
HcalDetId{0},
10, 10, 10, 1,
2.0, 2.0, 2.0,
false, false, false);
HBHEChannelInfo *d_info;
cudaMalloc((void**)&d_info, sizeof(HBHEChannelInfo));
cudaMemcpy(d_info, &h_info, sizeof(HBHEChannelInfo), cudaMemcpyHostToDevice);
kernel_test_hcal_hbhechinfo<<<1,1>>>(d_info);
cudaDeviceSynchronize();
check_error(cudaGetLastError());
cudaMemcpy(&h_info, d_info, sizeof(HBHEChannelInfo), cudaMemcpyDeviceToHost);
assert(h_info.id() == h_info_test.id());
assert(h_info.recoShape() == h_info_test.recoShape());
assert(h_info.nSamples() == h_info_test.nSamples());
assert(h_info.soi() == h_info_test.soi());
assert(h_info.capid() == h_info_test.capid());
assert(h_info.darkCurrent() == h_info_test.darkCurrent());
assert(h_info.fcByPE() == h_info_test.fcByPE());
assert(h_info.lambda() == h_info_test.lambda());
assert(h_info.hasLinkError() == h_info_test.hasLinkError());
assert(h_info.hasCapidError() == h_info_test.hasCapidError());
std::cout << "all good in " << __FUNCTION__ << std::endl;
}
int main(int argc, char ** argv) {
int nDevices;
cudaGetDeviceCount(&nDevices);
std::cout << "nDevices = " << nDevices << std::endl;
if (nDevices > 0) {
test_hcal_rechits<HBHERecHit>();
test_hcal_rechits<HFRecHit>();
test_hcal_rechits<HORecHit>();
test_hcal_hbhechinfo();
std::cout << "all good" << std::endl;
}
return 0;
}
|
591a1fcdf644a2dedd1965369ef8e879d53807ba.hip | // !!! This is a file automatically generated by hipify!!!
// Generated by Hybridizer version 1.0.0.0
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#if defined(__HIPCC__)
#ifndef hyb_device
#define hyb_inline __forceinline__
#define hyb_constant __constant__
#if defined(HYBRIDIZER_NO_HOST)
#define hyb_host
#define hyb_device __device__
#else
#define hyb_host __host__
#define hyb_device __device__
#endif
#endif
#else
#ifndef hyb_device
#define hyb_inline inline
#define hyb_device
#define hyb_constant
#endif
#endif
#pragma once
#if defined _WIN32 || defined _WIN64 || defined __CYGWIN__
#define BUILDING_DLL
#ifdef BUILDING_DLL
#ifdef __GNUC__
#define DLL_PUBLIC __attribute__ ((dllexport))
#else
#define DLL_PUBLIC __declspec(dllexport) // Note: actually gcc seems to also supports this syntax.
#endif
#else
#ifdef __GNUC__
#define DLL_PUBLIC __attribute__ ((dllimport))
#else
#define DLL_PUBLIC __declspec(dllimport) // Note: actually gcc seems to also supports this syntax.
#endif
#endif
#define DLL_LOCAL
#else
#if __GNUC__ >= 4
#define DLL_PUBLIC __attribute__ ((visibility ("default")))
#define DLL_LOCAL __attribute__ ((visibility ("hidden")))
#else
#define DLL_PUBLIC
#define DLL_LOCAL
#endif
#endif
// hybridizer core types
#include <cstdint>
namespace hybridizer { struct hybridobject ; }
namespace hybridizer { struct runtime ; }
#pragma region defined enums and types
#ifndef __ENUM_DECL_Hybridizer_Runtime_CUDAImports_ResidentArrayStatus__
#define __ENUM_DECL_Hybridizer_Runtime_CUDAImports_ResidentArrayStatus__
namespace Hybridizer { namespace Runtime { namespace CUDAImports {
enum struct ResidentArrayStatus
{
NoAction = 0,
DeviceNeedsRefresh = 1,
HostNeedsRefresh = 2,
} ;
} } } // Leaving namespace
#endif // __ENUM_DECL_Hybridizer_Runtime_CUDAImports_ResidentArrayStatus__
// Intrinsic type hipError_t used
#define __TYPE_DECL_cudaError_t__
#if defined(__cplusplus) || defined(__HIPCC__)
#ifndef __ENUM_DECL_Hybridizer_Runtime_CUDAImports_ResidentArrayStatus__
#define __ENUM_DECL_Hybridizer_Runtime_CUDAImports_ResidentArrayStatus__
namespace Hybridizer { namespace Runtime { namespace CUDAImports {
enum struct ResidentArrayStatus
{
NoAction = 0,
DeviceNeedsRefresh = 1,
HostNeedsRefresh = 2,
} ;
} } } // Leaving namespace
#endif // __ENUM_DECL_Hybridizer_Runtime_CUDAImports_ResidentArrayStatus__
namespace Hybridizer { namespace Runtime { namespace CUDAImports {
struct IResidentArray ;
} } } // Leaving namespace
namespace Hybridizer { namespace Runtime { namespace CUDAImports {
struct IResidentData ;
} } } // Leaving namespace
namespace System {
struct IDisposable ;
} // Leaving namespace
// Intrinsic type hipError_t used
#define __TYPE_DECL_cudaError_t__
namespace Hybridizer { namespace Runtime { namespace CUDAImports {
struct IntResidentArray ;
} } } // Leaving namespace
namespace Hybrid {
struct Program ;
} // Leaving namespace
namespace Hybrid {
struct Program___c__DisplayClass1_0 ;
} // Leaving namespace
namespace System { namespace Threading { namespace Tasks {
struct Parallel ;
} } } // Leaving namespace
// Intrinsic type Nullable`1 used
#define __TYPE_DECL_hybridizer_nullable__int64_t____
namespace System { namespace Threading { namespace Tasks {
struct ParallelLoopResult ;
} } } // Leaving namespace
// Intrinsic type Action`1 used
#define __TYPE_DECL_hybridizer_action__int____
namespace Hybrid {
struct Program___c__DisplayClass2_0 ;
} // Leaving namespace
namespace Hybrid {
struct Program___c__DisplayClass3_0 ;
} // Leaving namespace
namespace Hybrid {
struct Program___c__DisplayClass7_0 ;
} // Leaving namespace
#endif // TOTO
#pragma endregion
extern "C" void* __hybridizer_init_basic_runtime();
// ----- HYBRIDIZER_CUDA_CUH -----
#pragma once // hybridizer.cuda.cuh
#if defined(__CUDACC_RTC__)
#define HYBRIDIZER_NO_HOST
#endif
#if !defined(HYBRIDIZER_NO_HOST)
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <stdio.h>
#else
typedef signed char int8_t;
typedef short int16_t;
typedef int int32_t;
typedef long long int64_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef unsigned long long uint64_t;
#endif
#if !defined(DLL_PUBLIC)
#if defined _WIN32 || defined _WIN64 || defined __CYGWIN__
#define BUILDING_DLL
#ifdef BUILDING_DLL
#ifdef __GNUC__
#define DLL_PUBLIC __attribute__ ((dllexport))
#else
#define DLL_PUBLIC __declspec(dllexport) // Note: actually gcc seems to also supports this syntax.
#endif
#else
#ifdef __GNUC__
#define DLL_PUBLIC __attribute__ ((dllimport))
#else
#define DLL_PUBLIC __declspec(dllimport) // Note: actually gcc seems to also supports this syntax.
#endif
#endif
#define DLL_LOCAL
#else
#if __GNUC__ >= 4
#define DLL_PUBLIC __attribute__ ((visibility ("default")))
#define DLL_LOCAL __attribute__ ((visibility ("hidden")))
#else
#define DLL_PUBLIC
#define DLL_LOCAL
#endif
#endif
#endif
#if UINTPTR_MAX == 0xffffffffffffffff
/* 64-bit */
#else
// WTF ??? #define NO_EXCEPTION
#endif
#define HYBRIDIZER_GETTYPE_ID hybridizer::gettypeid
template<typename T> struct __hybridizer_argument_type;
template<typename T, typename U> struct __hybridizer_argument_type<T(U)> { typedef U type; };
#define HYBRIDIZER_QUERYINTERFACE(...) hybridizer::queryinterface<__hybridizer_argument_type<void(__VA_ARGS__)>::type>
#define HYBRIDIZER_SIGNAL_PURE_VIRTUAL(a, b) hybridizer::signal_pure_virtual(a, b)
#define HYBRIDIZER_FUNC3_INVOKE(functor,left,middle,right) (*functor) . invoke (left, middle, right)
#define HYBRIDIZER_FUNC2_INVOKE(functor,left,right) (*functor) . invoke (left, right)
#define HYBRIDIZER_FUNC1_INVOKE(functor,left) (*functor) . invoke (left)
#if defined(__HIPCC__)
#ifndef hyb_device
#define hyb_inline __forceinline__
#define hyb_constant __constant__
#if defined(HYBRIDIZER_NO_HOST)
#define hyb_host
#define hyb_device __device__
#else
#define hyb_host __host__
#define hyb_device __device__
#endif
#endif
#else
#ifndef hyb_device
#define hyb_inline inline
#define hyb_device
#define hyb_constant
#endif
#endif
#if defined(_MSC_VER)
#define STRUCT_ALIGNED_(x) __declspec(align(x))
#else
#if defined(__GNUC__)
#define STRUCT_ALIGNED_(x) __attribute__ ((aligned(x)))
#endif
#endif
#define __hybridizer_threadIdxX threadIdx.x
#define __hybridizer_threadIdxY threadIdx.y
#define __hybridizer_threadIdxZ threadIdx.z
#define __hybridizer_blockDimX blockDim.x
#define __hybridizer_blockDimY blockDim.y
#define __hybridizer_blockDimZ blockDim.z
#define __hybridizer_blockIdxX blockIdx.x
#define __hybridizer_blockIdxY blockIdx.y
#define __hybridizer_blockIdxZ blockIdx.z
#define __hybridizer_gridDimX gridDim.x
#define __hybridizer_gridDimY gridDim.y
#define __hybridizer_gridDimZ gridDim.z
#define __hybridizer_threadIdxXX64 threadIdx.x
#define __hybridizer_threadIdxYX64 threadIdx.y
#define __hybridizer_threadIdxZX64 threadIdx.z
#define __hybridizer_blockDimXX64 blockDim.x
#define __hybridizer_blockDimYX64 blockDim.y
#define __hybridizer_blockDimZX64 blockDim.z
#define __hybridizer_blockIdxXX64 blockIdx.x
#define __hybridizer_blockIdxYX64 blockIdx.y
#define __hybridizer_blockIdxZX64 blockIdx.z
#define __hybridizer_gridDimXX64 gridDim.x
#define __hybridizer_gridDimYX64 gridDim.y
#define __hybridizer_gridDimZX64 gridDim.z
extern __shared__ char __hybridizer_cuda_local_shared [] ;
#if defined(HYBRIDIZER_NULL_CHECKS_THROW_TRAP) || defined (HYBRIDIZER_NULL_CHECKS_BREAK) || defined (HYBRIDIZER_NULL_CHECKS_PRINT)
#define HYBRIDIZER_NULL_CHECKS
#endif
#ifdef HYBRIDIZER_NULL_CHECKS
template<typename T>
hyb_device hyb_inline static T* __hybridizer_null_check(T* input, const char* file, int line) {
if(nullptr == input) {
#ifdef HYBRIDIZER_NULL_CHECKS_BREAK
asm("brkpt;");
#elif defined (HYBRIDIZER_NULL_CHECKS_THROW_TRAP)
asm("trap;");
#elif defined(HYBRIDIZER_NULL_CHECKS_PRINT)
printf("null pointer at %s:%d\n", file, line);
#else
#endif
}
return input;
}
#define HYBRIDIZER_NULL_CHECK(param) __hybridizer_null_check(param, __FILE__, __LINE__)
#else
#define HYBRIDIZER_NULL_CHECK(param) (param)
#endif
namespace hybridizer {
template <typename T> __device__ T shuffle (T t, int srcLane) { return __shfl (t, srcLane) ; }
template <typename T> __device__ T shuffleup (T t, unsigned int shift) { return __shfl_up (t, shift) ; }
template <typename T> __device__ T shuffledown (T t, unsigned int shift) { return __shfl_down (t, shift) ; }
template <typename T> __device__ T shufflexor (T t, unsigned int shift) { return __shfl_xor (t, shift) ; }
}
namespace hybridizer {
template <typename T, int rank>
struct hybarray;
}
namespace hybridizer {
struct hybridobject { union { void* _vtable ; int _typeid ; char _vtable_padding[8] ; } ; } ;
struct datetime { long long _date; } ;
__forceinline__ hyb_device static int gettypeid (void* ptr) {
if (ptr == 0) return 0 ;
return ((hybridobject*)ptr)->_typeid ;
}
template<typename T>
__forceinline__ hyb_device static T queryinterface (hybridobject* ptr) {
return ((T)ptr) ;
}
template<typename T>
__forceinline__ hyb_device static T queryinterface (void* ptr) {
return ((T)ptr) ;
}
template<typename T>
hyb_inline hyb_device static T constrained (T* ptr) { return *ptr; }
template<typename T>
hyb_inline hyb_device static T constrained (T const * ptr) { return *ptr; }
template<typename T>
hyb_inline hyb_device static T constrained (T& ptr) { return ptr; }
template<typename T>
hyb_inline hyb_device static T constrained (T const & ptr) { return ptr; }
template<typename T>
struct sharedmemoryallocator
{
// TODO : HYB-794 => shared max allocation => error !!
// Allocate a hybarray in shared memory (can be automatically converted to raw pointer if needed)
__forceinline__ __device__ hybridizer::hybarray<T, 1> allocate(int count)
{
hybridizer::hybarray<T, 1> res ;
res.ptr = (T*) (&(__hybridizer_cuda_local_shared[(*((int*)__hybridizer_cuda_local_shared))])) ;
res.length[0] = count;
res.lowerBound[0] = 0;
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
int sizeElt = sizeof(T);
int size = count * sizeElt;
(*((int*)__hybridizer_cuda_local_shared)) += size ;
}
return res ;
}
// Allocate a raw pointer in shared memory
__forceinline__ __device__ T* allocate_raw(int count)
{
T* res = (T*) (&(__hybridizer_cuda_local_shared[(*((int*)__hybridizer_cuda_local_shared))])) ;
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
(*((int*)__hybridizer_cuda_local_shared)) += count * sizeof (T) ;
}
return res ;
}
int initialOffset ;
bool resetAtDestruction;
__forceinline__ __device__ sharedmemoryallocator(bool reset = true)
{
initialOffset = (*((int*)__hybridizer_cuda_local_shared)) ;
resetAtDestruction = reset;
}
__forceinline__ __device__ ~sharedmemoryallocator()
{
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && resetAtDestruction) {
if ((*((int*)__hybridizer_cuda_local_shared)) > initialOffset)
*((int*)__hybridizer_cuda_local_shared) = initialOffset;
}
}
} ;
hyb_inline hyb_device double getarraydouble(void* unused, void* ar, int idx) { return ((double*)ar)[idx] ; }
hyb_inline hyb_device void setarraydouble(void* unused, void* ar, int idx, double value) { ((double*)ar)[idx] = value ; }
hyb_inline hyb_device float getarrayfloat(void* unused, void* ar, int idx) { return ((float*)ar)[idx] ; }
hyb_inline hyb_device void setarrayfloat(void* unused, void* ar, int idx, float value) { ((float*)ar)[idx] = value ; }
hyb_inline hyb_device float getarrayint(void* unused, void* ar, int idx) { return ((int*)ar)[idx] ; }
hyb_inline hyb_device void setarrayint(void* unused, void* ar, int idx, float value) { ((int*)ar)[idx] = value ; }
template<typename T> hyb_inline hyb_device T getarray(void* unused, void* ar, int idx) { return ((T*)ar)[idx] ; }
template<typename T> hyb_inline hyb_device void setarray(void* unused, void* ar, int idx, T value) { ((T*)ar)[idx] = value ; }
}
__forceinline__ __device__ char* SharedMemoryPointer(int count)
{
hybridizer::sharedmemoryallocator<char> allocator;
allocator.resetAtDestruction = false;
return allocator.allocate_raw(count);
}
__forceinline__ __device__ char* GetSharedMemoryArray() {
return (char*) __hybridizer_cuda_local_shared;
}
/*
namespace hybridizer
{
void launch_kernel(int griddimx, int griddimy, int blockdimx, int blockdimy, int blockdimz, int shared,
void (*kernel) ())
{
dim3 grid (griddimx, griddimy, 1);
dim3 block(blockdimx, blockdimy, blockdimz);
kernel<<<grid, block, shared>>>();
}
template <typename T>
void launch_kernel(int griddimx, int griddimy, int blockdimx, int blockdimy, int blockdimz, int shared,
void (*kernel) (T arg), T a)
{
dim3 grid (griddimx, griddimy, 1);
dim3 block(blockdimx, blockdimy, blockdimz);
kernel<<<grid, block, shared>>>(a);
}
template <typename T1, typename T2>
void launch_kernel(int griddimx, int griddimy, int blockdimx, int blockdimy, int blockdimz, int shared,
void (*kernel) (T1 arg1, T2 arg2), T1 a, T2 b) {
dim3 grid (griddimx, griddimy, 1);
dim3 block(blockdimx, blockdimy, blockdimz);
kernel<<<grid, block, shared>>>(a, b);
}
template <typename T1, typename T2, typename T3>
void launch_kernel(int griddimx, int griddimy, int blockdimx, int blockdimy, int blockdimz, int shared,
void (*kernel) (T1 arg1, T2 arg2, T3 arg3), T1 a, T2 b, T3 c) {
dim3 grid (griddimx, griddimy, 1);
dim3 block(blockdimx, blockdimy, blockdimz);
kernel<<<grid, block, shared>>>(a, b, c);
}
template <typename T1, typename T2, typename T3, typename T4>
void launch_kernel(int griddimx, int griddimy, int blockdimx, int blockdimy, int blockdimz, int shared,
void (*kernel) (T1 arg1, T2 arg2, T3 arg3, T4 arg4), T1 a, T2 b, T3 c, T4 d) {
dim3 grid (griddimx, griddimy, 1);
dim3 block(blockdimx, blockdimy, blockdimz);
kernel<<<grid, block, shared>>>(a, b, c, d);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5>
void launch_kernel(int griddimx, int griddimy, int blockdimx, int blockdimy, int blockdimz, int shared,
void (*kernel) (T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5), T1 a, T2 b, T3 c, T4 d, T5 e) {
dim3 grid (griddimx, griddimy, 1);
dim3 block(blockdimx, blockdimy, blockdimz);
kernel<<<grid, block, shared>>>(a, b, c, d, e);
}
// TODO: more versions or variadic templates
}
*/
/*
template <> __device__ int hybridizer::constrained<int,int*>(int* i) { return *i ; }
template <> __device__ int hybridizer::constrained<int,int>(int& i) { return i ; }
*/
#if !defined(__CUDA_ARCH__)
#include <limits>
#endif
#ifndef HYBRIDIZER_NO_HOST
#include <stdio.h>
#include <stdarg.h>
#define hybridizer__hyprintfva(f, format, ...) printf (format, __VA_ARGS__)
#define hybridizer__hyprintflineva(f, format, ...) hybridizer__hyprintfva (f, format "\n", __VA_ARGS__)
namespace hybridizer
{
inline __device__ static FILE* get_Out() {
return 0 ;
}
inline __device__ static void hyprintf(FILE* f, const char* message) {
printf(message);
}
inline __device__ static void hyprintfline(FILE* f, const char* message) {
printf("%s\n", message);
}
};
#include <math_constants.h>
namespace hybridizer
{
#if defined(__CUDA_ARCH__)
template <typename T> __device__ hyb_host T nan () ;
template <typename T> __device__ hyb_host T pos_infinity () ;
template <typename T> __device__ hyb_host T neg_infinity () ;
template <> __forceinline__ __device__ float nan<> () { return CUDART_NAN_F ; }
template <> __forceinline__ __device__ float pos_infinity<> () { return CUDART_INF_F ; }
template <> __forceinline__ __device__ float neg_infinity<> () { return - CUDART_INF_F ; }
template <> __forceinline__ __device__ double nan<> () { return CUDART_NAN ; }
template <> __forceinline__ __device__ double pos_infinity<> () { return CUDART_INF ; }
template <> __forceinline__ __device__ double neg_infinity<> () { return - CUDART_INF ; }
#endif
#if !defined(__CUDA_ARCH__)
template <typename T> __device__ T nan () { return std::numeric_limits<T>::quiet_NaN(); }
template <typename T> __device__ T pos_infinity () { return std::numeric_limits<T>::infinity(); }
template <typename T> __device__ T neg_infinity () { return - std::numeric_limits<T>::infinity(); }
#endif
};
#else
// HYBRIDIZER_NO_HOST
#define hybridizer__hyprintfva(f, format, ...) printf (format, __VA_ARGS__)
#define hybridizer__hyprintflineva(f, format, ...) hybridizer__hyprintfva (f, format "\n", __VA_ARGS__)
typedef void FILE ;
namespace hybridizer
{
inline __device__ static FILE* get_Out() {
return 0 ;
}
inline __device__ static void hyprintf(FILE* f, const char* message) {
printf(message);
}
inline __device__ static void hyprintfline(FILE* f, const char* message) {
printf("%s\n", message);
}
};
// for NVRTC
namespace hybridizer
{
#if defined(__CUDA_ARCH__)
template <typename T> __device__ hyb_host T nan () ;
template <typename T> __device__ hyb_host T pos_infinity () ;
template <typename T> __device__ hyb_host T neg_infinity () ;
template <> __forceinline__ __device__ float nan<> () { return __int_as_float(0x7fffffff); }
template <> __forceinline__ __device__ float pos_infinity<> () { return __int_as_float(0x7f800000) ; }
template <> __forceinline__ __device__ float neg_infinity<> () { return - __int_as_float(0x7f800000) ; }
template <> __forceinline__ __device__ double nan<> () { return __longlong_as_double(0xfff8000000000000ULL) ; }
template <> __forceinline__ __device__ double pos_infinity<> () { return __longlong_as_double(0x7ff0000000000000ULL) ; }
template <> __forceinline__ __device__ double neg_infinity<> () { return - __longlong_as_double(0x7ff0000000000000ULL) ; }
#endif
}
#endif
namespace hybridizer
{
__device__ static void signal_pure_virtual (const char* method, int id)
{
// TODO : throw exception...
::printf ("Pure virtual call for method <%s> on type id <%d> (CUDA flavor)\n", method, id) ;
}
};
namespace hybridizer
{
struct runtime;
__forceinline__ __device__ void initruntime(hybridizer::runtime* rt)
{
if (threadIdx.x == 0 && threadIdx.y == 0)
{
// init shared memory offset to zero
(*((int*)__hybridizer_cuda_local_shared)) = 16 ;
(*(((int*)__hybridizer_cuda_local_shared) + 1)) = 0 ;
#ifndef NO_EXCEPTION
(*((runtime**)(__hybridizer_cuda_local_shared + 8))) = rt ;
#endif
}
__syncthreads();
}
__forceinline__ __device__ hybridizer::runtime* getruntime()
{
#ifndef NO_EXCEPTION
return (*((runtime**)(__hybridizer_cuda_local_shared + 8))) ;
#else
return 0;
#endif
}
}
#ifndef NO_EXCEPTION
#pragma once
//#define HYBRIDIZER_EXCEPTIONS_HANDLE_THREAD
#include <hip/hip_runtime_api.h>
#define CUDA_CHECK(x) {if (x != hipSuccess) {printf("Cuda error %d\n", x); exit(-1);} }
// EXCEPTION MODES
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
#define HYBRIDIZER_EXCEPTIONS_MODE_DEFINED
#endif
#ifdef HYBRIDIZER_EXCEPTIONS_HANDLE_THREAD
#define HYBRIDIZER_EXCEPTIONS_MODE_DEFINED
#endif
#ifdef HYBRIDIZER_EXCEPTIONS_HANDLE_BLOCK
#define HYBRIDIZER_EXCEPTIONS_MODE_DEFINED
#endif
#ifdef HYBRIDIZER_EXCEPTIONS_NONE
#define NO_EXCEPTION
#endif
#ifdef NO_EXCEPTION
#define HYBRIDIZER_EXCEPTIONS_MODE_DEFINED
#endif
#ifndef HYBRIDIZER_EXCEPTIONS_MODE_DEFINED
#define NO_EXCEPTION
#endif
#pragma region Runtime : Exceptions management
#ifndef __FILE_ID__
#define __FILE_ID__ 42
#endif
#pragma region Bit scan forward intrinsics
#if defined(_MSC_VER) && !defined(__clang__)
#include <intrin.h>
#pragma intrinsic(_BitScanForward)
static hyb_inline uint32_t bit_scan_forward(uint32_t x)
{
unsigned long res;
int status = _BitScanForward(&res, x);
return status ? res + 1 : 0;
}
extern void _hyb_raise_exception(int errorCode);
#elif defined (__INTEL_COMPILER)
static hyb_inline uint32_t bit_scan_forward(uint32_t x)
{
int r;
__asm__("bsf %1,%0" : "=r"(r) : "X"(x));
return r;
}
#elif defined(__clang__)
static hyb_inline uint32_t bit_scan_forward(uint32_t x)
{
return (uint32_t)__builtin_ffs (x) ;
}
#elif defined(__GNUC__)
static hyb_inline uint32_t bit_scan_forward(uint32_t x)
{
return (uint32_t)__builtin_ffs (x) ;
}
#else
#error "Unsupported compiler - no ffs intrinsic"
#endif
#pragma endregion
#ifndef NO_EXCEPTION
#include <thread>
#include <map>
#include <hip/driver_types.h>
#include <mutex>
#endif
namespace hybridizer
{
struct exceptionframe
{
int filenum;
int linenum;
};
struct exceptionentry
{
union { void* exceptiondata; exceptionframe frame; };
hyb_host hyb_device hyb_inline exceptionentry(int file, int line)
{
frame.filenum = file;
frame.linenum = line;
}
hyb_host hyb_device hyb_inline exceptionentry(void* data)
{
exceptiondata = data;
}
};
struct exceptioninstance
{
union { void* __vtable; int __typeid; };
};
struct exceptionstack
{
exceptionentry entries[1023];
int count;
int location;
int code;
hyb_device hyb_inline void set(exceptionentry entry, int loc, int code)
{
count = 1;
entries[0] = entry;
location = loc;
this->code = code;
}
hyb_device hyb_inline void populate(int file, int line)
{
entries[count].frame.filenum = file;
entries[count].frame.linenum = line;
++count;
if (count == 1023) count = 1022; // skip the topmost frames...
}
};
struct baseexception
{
hyb_device hyb_inline static int hybridizer_typeid() {return 0;}
union { void* __vtable; int __typeid; };
union { char* _message; char __message_padding[8]; };
};
struct gpuexception : baseexception
{
};
struct nullpointerexception : baseexception {};
struct indexoutofboundsexception : baseexception {};
template <typename T>
struct exceptionhandle
{
T exception;
exceptionentry* stack; // this is only valid until the next throw...
int stacksize;
hyb_device hyb_inline exceptionhandle() {}
hyb_device hyb_inline exceptionhandle(T ex) { exception = ex; }
};
class runtime
{
public:
typedef void (* exception_callback_type)(const int code);
#ifndef HYBRIDIZER_NO_HOST
hyb_inline hyb_host static runtime* host_getruntime(hipStream_t stream);
hyb_inline hyb_host static runtime* host_getruntime();
hyb_inline hyb_host void host_release();
hyb_inline hyb_host static void hostrethrow(runtime* hrt);
#endif
hyb_device hyb_inline static void init(runtime* rt);
template<typename extype>
hyb_device hyb_inline static void* allocateinstance()
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
return nullptr;
#else
runtime* local_runtime = (runtime*)getruntime();
int index = atomicAdd(&(local_runtime->exceptioninstancescount), sizeof(extype));
return (void*)(local_runtime->exceptioninstances + index);
#endif
#else
return NULL;
#endif
}
int exceptionstatus[64]; // only valid in shared memory
exceptionstack* exceptionstacks; // One stack per thread in grid (can be big)
unsigned exceptioncount;
unsigned exceptioninstancescount;
unsigned gridsize; // Currently allocated exceptionstacks
int* exceptionentries;
char* exceptioninstances;
exception_callback_type _exception_callback;
private:
hipStream_t _stream;
runtime(hipStream_t stream) : _stream(stream) {}
#ifndef NO_EXCEPTION
static std::map<std::pair<std::thread::id, hipStream_t>, runtime*> g_runtime_dict;
static std::mutex g_dict_mutex;
#endif
public:
hyb_device hyb_inline static int threadid()
{
return threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
}
hyb_device hyb_inline static int blockid()
{
return blockIdx.x + gridDim.x * (blockIdx.y + gridDim.y * blockIdx.z);
}
hyb_device hyb_inline static bool inexception()
{
#ifndef NO_EXCEPTION
//if (reinterpret_cast<int*>(__hybridizer_cuda_local_shared)[1] == 0)
// return false;
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
return false;
#else
runtime* local_runtime = (runtime*)getruntime();
int tid = threadid();
__syncthreads();
int warpStatus = local_runtime->exceptionstatus[tid >> 5];
int mask = 1 << (tid & 31);
return (warpStatus & mask) != 0;
#endif
#else
return false;
#endif
}
hyb_device hyb_inline static void throwexception(void* data, int file, int line, int code)
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
asm("trap;");
#else
//reinterpret_cast<int*>(__hybridizer_cuda_local_shared)[1] = -1;
runtime* local_runtime = (runtime*)getruntime();
int tid = threadid();
int bid = blockid();
atomicOr(&(local_runtime->exceptionstatus[tid >> 5]), 1 << (tid & 31));
int index = tid + (blockDim.x * blockDim.y * blockDim.z * bid);
unsigned int location = atomicInc(&local_runtime->exceptioncount, 0x7FFFFFFF);
local_runtime->exceptionstacks[index].set(exceptionentry(data), location, code);
local_runtime->exceptionstacks[index].populate(file, line);
//local_runtime->exceptionentries [location] = index ;
#endif
#endif
}
hyb_device hyb_inline static void populatestackframe(int file, int line)
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
// DO NOTHING
#else
if (inexception())
{
runtime* local_runtime = (runtime*)getruntime();
int tid = threadid();
int bid = blockid();
int index = tid + (blockDim.x * blockDim.y * blockDim.z * bid);
local_runtime->exceptionstacks[index].populate(file, line);
}
#endif
#endif
}
// returns true if the instancetypeid is a subtype of basetypeid
hyb_device hyb_inline static bool implements(int basetypeid, int instancetypeid)
{
// TODO : really work
return basetypeid == instancetypeid;
}
template <typename T>
hyb_device hyb_inline static exceptionhandle<T> catchexception()
{
exceptionhandle<T> result;
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
return result;
#else
if (!inexception()) return exceptionhandle<T>(0);
runtime* local_runtime = (runtime*)getruntime();
int tid = threadid();
int bid = blockid();
int index = tid + (blockDim.x * blockDim.y * blockDim.z * bid);
baseexception* be = (baseexception*)local_runtime->exceptionstacks[index].entries[0].exceptiondata;
// if (be == 0) return exceptionhandle<T>(0) ; // => ideally, actually return an inner runtime exception
// TODO : if (runtime::implements (T::extypeid(), be->__typeid))
{
// exception is caught - get the call stack....
result.exception = (T)be;
result.stacksize = local_runtime->exceptionstacks[index].count - 1;
result.stack = local_runtime->exceptionstacks[index].entries + 1;
// ... and reset !
int location = local_runtime->exceptionstacks[index].location;
//local_runtime->exceptionentries [location] = -1 ; // reset
local_runtime->exceptionstacks[index].count = 0;
int mask = ~(1 << (tid & 31));
auto tmp = local_runtime->exceptionstatus[tid >> 5];
atomicAnd(&(local_runtime->exceptionstatus[tid >> 5]), ~(1 << (tid & 31)));
}
// see TODO : else {
// exception is not in proper type hierarchy
// return exceptionhandle<T>(0) ;
//}
#endif
#endif
return result;
}
#ifndef HYBRIDIZER_NO_HOST
static hyb_inline void hostfree(runtime* hrt)
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
// DO NOTHING
#else
::hipFree(hrt);
#endif
#endif
}
static hyb_inline void hostinit(runtime* hrt, int gridsize, int exbuffersize)
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
// DO NOTHING
#else
// TODO : check and report this error !!!
for (int k = 0; k < 64; ++k)
{
hrt->exceptionstatus[k] = 0;
}
hrt->exceptioncount = 0;
hrt->exceptioninstancescount = 0;
CUDA_CHECK(::hipMallocManaged(&(hrt->exceptionstacks), gridsize * sizeof(exceptionstack) + exbuffersize));
hrt->exceptioninstances = (((char*)hrt->exceptionstacks) + gridsize * sizeof(exceptionstack));
hrt->gridsize = gridsize;
#endif
#endif
}
};
void runtime::hostrethrow(runtime* hrt)
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
// DO NOTHING
#else
for (int k = 0; k < 64; ++k)
{
if (hrt->exceptionstatus[k] != 0)
{
int idx = k * 64 + bit_scan_forward(hrt->exceptionstatus[k]);
// we have an exception => throw
printf("Throwing %d\n", hrt->exceptionstacks[idx].code);
//throw hrt->exceptionstacks[idx].code;
hrt->_exception_callback(hrt->exceptionstacks[idx].code);
}
}
hrt->host_release();
#endif
#endif
}
#endif
hyb_device void runtime::init(runtime* rt)
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
// DO NOTHING !
#else
runtime* local_runtime = getruntime();
int tid = threadid();
if ((tid & 31) == 0) local_runtime->exceptionstatus[tid >> 5] = 0;
__syncthreads();
#endif
#endif
}
#ifndef HYBRIDIZER_NO_HOST
hyb_host runtime* runtime::host_getruntime(hipStream_t stream)
{
#ifdef NO_EXCEPTION
return nullptr;
#else
auto key = std::make_pair(std::this_thread::get_id(), stream);
std::lock_guard<std::mutex> guard(g_dict_mutex);
{
auto it = g_runtime_dict.find(key);
runtime* res;
if (it == g_runtime_dict.end())
{
CUDA_CHECK(::hipMallocManaged((void**)&res, sizeof(runtime)));
res->_stream = stream;
g_runtime_dict[key] = res;
} else
res = it->second;
return res;
}
#endif
}
hyb_host runtime* runtime::host_getruntime()
{
return host_getruntime((hipStream_t) 0);
}
hyb_host void runtime::host_release()
{
#ifndef NO_EXCEPTION
auto key = std::make_pair(std::this_thread::get_id(), _stream);
std::lock_guard<std::mutex> guard(g_dict_mutex);
{
auto it = g_runtime_dict.find(key);
if (it != g_runtime_dict.end())
g_runtime_dict.erase(key);
::hipFree(this->exceptionstacks);
::hipFree(this);
}
#endif
}
#endif
}
#pragma endregion
#ifndef NO_EXCEPTION
hyb_device hyb_inline void thrownullpointerexception(int fileid, int line)
{
::hybridizer::runtime::throwexception(::hybridizer::runtime::allocateinstance<::hybridizer::nullpointerexception>(), fileid, line, -1);
}
hyb_device hyb_inline void throwindexoutofboundsexception(int fileid, int line)
{
::hybridizer::runtime::throwexception(::hybridizer::runtime::allocateinstance<::hybridizer::indexoutofboundsexception>(), fileid, line, -2);
}
namespace hybridizer
{
template <typename T, typename I>
hyb_device hyb_inline const T& checknpeandbounds(const hybarray<T,1>& a, I i, int fileid, int line)
{
if (a.ptr == nullptr) thrownullpointerexception(fileid, line);
if (i < a.lowerBound[0]) throwindexoutofboundsexception(fileid, line);
if (i >= a.length[0]) throwindexoutofboundsexception(fileid, line);
return a [i] ;
}
template <typename T, typename I>
hyb_device hyb_inline T& checknpeandbounds(hybarray<T, 1>& a, I i, int fileid, int line)
{
if (a.ptr == nullptr) thrownullpointerexception(fileid, line);
if (i < a.lowerBound[0]) throwindexoutofboundsexception(fileid, line);
if (i >= a.length[0]) throwindexoutofboundsexception(fileid, line);
return a[i];
}
template <typename T>
hyb_device hyb_inline T* checknpe(T* p, int fileid, int line)
{
if (p == nullptr) thrownullpointerexception(fileid, line);
return p ;
}
template <typename T, int rank>
hyb_device hyb_inline hybarray<T, rank> checknpe(hybarray<T, rank> p, int fileid, int line)
{
if (p.ptr == nullptr) thrownullpointerexception(fileid, line);
return p ;
}
}
#define __hybridizer__checknpeandbounds(a,i) hybridizer::checknpeandbounds(a,i,__FILE_ID__, __LINE__)
#define __hybridizer__checknpe(a) hybridizer::checknpe(a,__FILE_ID__, __LINE__)
#else
#define __hybridizer__checknpeandbounds(a,i) a[i]
#define __hybridizer__checknpe(a) a
#endif
#endif
namespace hybridizer {
// Native structure for array with length, can be used as an array anywhere
template <typename T, int rank>
struct hybarray {
union {T *ptr ; char __hybridizer_padding_ptr[8] ; } ;
uint32_t length[rank] ;
uint32_t lowerBound[rank];
hyb_inline hyb_device bool check_index(int k, int i) const
{
#ifndef NO_EXCEPTION
if (i < 0 || i >= length[k])
{
::hybridizer::runtime::throwexception(::hybridizer::runtime::allocateinstance<::hybridizer::baseexception>(), __FILE_ID__, __LINE__, -2) ;
return false;
}
#endif
return true;
}
hyb_inline hyb_device const T& operator[] (int i) const { if (check_index(0, i)) return ptr [i];}
hyb_inline hyb_device T& operator[] (int i) { if (check_index(0, i)) return ptr [i];}
hyb_inline hyb_device operator T* () const { return ptr ; }
hyb_inline hyb_device int getRank() const { return rank ; }
hyb_inline hyb_device int getLength(int d) const { return length[d] ; }
hyb_inline hyb_device int getLowerbound(int d) const { return lowerBound[d] ; }
hyb_inline hyb_device int getIndex(const int indexes[]) const
{
int index = 0;
for (int k = 0 ; k < rank ; ++k)
{
int sub = indexes[k] - lowerBound[k];
if (k > 0) index *= length [k];
index += sub ;
check_index(k, sub);
}
return index;
}
hyb_inline hyb_device const T& get(const int indexes[]) const
{
return ptr [getIndex(indexes)] ;
}
hyb_inline hyb_device T* getAdr(const int indexes[]) const
{
return ptr + getIndex(indexes) ;
}
hyb_inline hyb_device void set(const int indexes[], T value)
{
ptr[getIndex(indexes)] = value;
}
};
template <typename T, int rank> hyb_inline hyb_device int hyblength(hybarray<T, rank> ar, int dim) { return ar.length[dim] ; }
template <typename T, int rank> hyb_inline hyb_device int hyblength(hybarray<T, rank>* ar, int dim) { return ar->length[dim] ; }
template <typename T, int rank> hyb_inline hyb_device int hyblowerbound(hybarray<T, rank> ar, int dim) { return ar.lowerBound[dim] ; }
template <typename T, int rank> hyb_inline hyb_device int hyblowerbound(hybarray<T, rank>* ar, int dim) { return ar->lowerBound[dim] ; }
template <typename T, int rank> hyb_inline hyb_device const T& hybget(hybarray<T, rank> ar, int i) { return ar[i] ; }
template <typename T, int rank> hyb_inline hyb_device const T& hybget(hybarray<T, rank> ar, int i, int j) { int idx[2] = {i, j}; return ar.get(idx) ; }
template <typename T, int rank> hyb_inline hyb_device const T& hybget(hybarray<T, rank> ar, int i, int j, int k) { int idx[3] = {i, j, k}; return ar.get(idx) ; }
template <typename T, int rank> hyb_inline hyb_device void hybset(hybarray<T, rank> ar, int i, T value) { return ar[i] = value ; }
template <typename T, int rank> hyb_inline hyb_device void hybset(hybarray<T, rank> ar, int i, int j, T value) { int idx[2] = {i, j}; return ar.set(idx, value) ; }
template <typename T, int rank> hyb_inline hyb_device void hybset(hybarray<T, rank> ar, int i, int j, int k, T value) { int idx[3] = {i, j, k}; return ar.set(idx, value) ; }
template <typename T, int rank> hyb_inline hyb_device const T& hybget(hybarray<T, rank>* ar, int i) { return ar->operator [](i); }
template <typename T, int rank> hyb_inline hyb_device const T& hybget(hybarray<T, rank>* ar, int i, int j) { int idx[2] = { i, j }; return ar->get(idx); }
template <typename T, int rank> hyb_inline hyb_device const T& hybget(hybarray<T, rank>* ar, int i, int j, int k) { int idx[3] = { i, j, k }; return ar->get(idx); }
template <typename T, int rank> hyb_inline hyb_device void hybset(hybarray<T, rank>* ar, int i, T value) { return ar->operator [](i) = value; }
template <typename T, int rank> hyb_inline hyb_device void hybset(hybarray<T, rank>* ar, int i, int j, T value) { int idx[2] = { i, j }; return ar->set(idx, value); }
template <typename T, int rank> hyb_inline hyb_device void hybset(hybarray<T, rank>* ar, int i, int j, int k, T value) { int idx[3] = { i, j, k }; return ar->set(idx, value); }
template <typename T, int rank> hyb_inline hyb_device T* hybaddress(hybarray<T, rank> ar, int i) { return ar + i ; }
template <typename T, int rank> hyb_inline hyb_device T* hybaddress(hybarray<T, rank> ar, int i, int j) { int idx[2] = {i, j}; return ar.getAdr(idx) ; }
template <typename T, int rank> hyb_inline hyb_device T* hybaddress(hybarray<T, rank> ar, int i, int j, int k) { int idx[3] = {i, j, k}; return ar.getAdr(idx) ; }
};
/* surface management */
#pragma region Surface Management
/*#if (__CUDA_ARCH__ < 300)
#error __CUDA_ARCH__ < 300
#else */
#if 1
namespace hybridizer
{
#if (__CUDA_ARCH__ >= 300)
template <int size> inline __device__ void sizedSurf2Dread (hipSurfaceObject_t surface, int x, int y, void* output) ;
template<> inline __device__ void sizedSurf2Dread<16>(hipSurfaceObject_t surface, int x, int y, void* output)
{
int4 tmp = ::surf2Dread<int4>(surface, x * 16, y) ;
((int*)output)[0] = tmp.x ;
((int*)output)[1] = tmp.y ;
((int*)output)[2] = tmp.z ;
((int*)output)[3] = tmp.w ;
}
template<> inline __device__ void sizedSurf2Dread<8>(hipSurfaceObject_t surface, int x, int y, void* output)
{
int2 tmp = ::surf2Dread<int2>(surface, x * 8, y) ;
((int*)output)[0] = tmp.x ;
((int*)output)[1] = tmp.y ;
}
template<> inline __device__ void sizedSurf2Dread<4>(hipSurfaceObject_t surface, int x, int y, void* output)
{
int tmp = ::surf2Dread<int>(surface, x * 4, y) ;
((int*)output)[0] = tmp ;
}
template<> inline __device__ void sizedSurf2Dread<2>(hipSurfaceObject_t surface, int x, int y, void* output)
{
short tmp = ::surf2Dread<short>(surface, x * 2, y) ;
((short*)output)[0] = tmp ;
}
template<> inline __device__ void sizedSurf2Dread<1>(hipSurfaceObject_t surface, int x, int y, void* output)
{
char tmp = ::surf2Dread<char>(surface, x, y) ;
((char*)output)[0] = tmp ;
}
#endif
template <typename T>
static inline __device__ void surf2Dread(void*, hipSurfaceObject_t surface, int x, int y, T* output)
{
#if (__CUDA_ARCH__ >= 300)
sizedSurf2Dread <sizeof(T)>(surface, x, y, output);
#endif
}
#if (__CUDA_ARCH__ >= 300)
template <int size> inline __device__ void sizedSurf2Dwrite (hipSurfaceObject_t surface, int x, int y, void* output) ;
template<> inline __device__ void sizedSurf2Dwrite<16>(hipSurfaceObject_t surface, int x, int y, void* input)
{
int4 tmp ;
tmp.x = ((int*)input)[0] ;
tmp.y = ((int*)input)[1] ;
tmp.z = ((int*)input)[2] ;
tmp.w = ((int*)input)[3] ;
::surf2Dwrite(tmp, surface, x * 16, y) ;
}
template<> inline __device__ void sizedSurf2Dwrite<8>(hipSurfaceObject_t surface, int x, int y, void* input)
{
int2 tmp ;
tmp.x = ((int*)input)[0] ;
tmp.y = ((int*)input)[1] ;
::surf2Dwrite(tmp, surface, x * 8, y) ;
}
template<> inline __device__ void sizedSurf2Dwrite<4>(hipSurfaceObject_t surface, int x, int y, void* input)
{
int tmp ;
tmp = ((int*)input)[0] ;
::surf2Dwrite(tmp, surface, x * 4, y) ;
}
template<> inline __device__ void sizedSurf2Dwrite<2>(hipSurfaceObject_t surface, int x, int y, void* input)
{
short tmp ;
tmp = ((short*)input)[0] ;
::surf2Dwrite(tmp, surface, x * 2, y) ;
}
template<> inline __device__ void sizedSurf2Dwrite<1>(hipSurfaceObject_t surface, int x, int y, void* input)
{
char tmp ;
tmp = ((char*)input)[0] ;
::surf2Dwrite(tmp, surface, x, y) ;
}
#endif
template <typename T>
static inline __device__ void surf2Dwrite(void*, hipSurfaceObject_t surface, int x, int y, T* input)
{
#if (__CUDA_ARCH__ >= 300)
sizedSurf2Dwrite<sizeof(T)>(surface, x, y, input) ;
#endif
}
};
#endif
#pragma endregion
#ifndef ATOMICADD_PDD
#define ATOMICADD_PDD
#if __CUDACC_VER_MAJOR__ < 8
__device__ static inline double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old); return __longlong_as_double(old);
}
#endif
#endif //ATOMICADD_PDD
#ifndef ATOMICMAX_PDD
#define ATOMICMAX_PDD
__device__ static inline double atomicMax(double* address, double val)
{
unsigned long long int * addr_as_ull = (unsigned long long int *)address;
unsigned long long int old = *addr_as_ull;
unsigned long long assumed;
do {
assumed = old;
if (val > __longlong_as_double(assumed))
old = atomicCAS(addr_as_ull, assumed, __double_as_longlong(val));
else
break;
} while(assumed != old);
return __longlong_as_double(old);
}
#endif //ATOMICMAX_PDD
struct int8 {
int x, y, z, w, x2,y2,z2,w2 ;
hyb_device hyb_inline int8() {}
hyb_device hyb_inline int8(int xx, int yy, int zz, int ww, int xx2, int yy2, int zz2, int ww2) {
x = xx; y = yy; z = zz; w = ww;
x2 = xx2; y2 = yy2; z2 = zz2; w2 = ww2;
}
};
hyb_inline hyb_device static int8 operator+(const int8& l, const int8&r) {
int8 res;
int* pres = (int*)&res;
int* pl = (int*)&l;
int* pr = (int*)&r;
for(int i = 0; i < 8; ++i) {
pres[i] = pl[i] + pr[i];
}
return res;
}
struct bool8 {
unsigned char mask;
};
struct float8 {
typedef bool8 masktype ;
float x, y, z, w, x2, y2, z2, w2;
hyb_device hyb_inline float8() {}
hyb_device hyb_inline float8(const float8& p)
{
x = p.x ;
y = p.y ;
z = p.z ;
w = p.w ;
x2 = p.x2 ;
y2 = p.y2 ;
z2 = p.z2 ;
w2 = p.w2 ;
}
hyb_device hyb_inline float8(float ix, float iy, float iz, float iw, float ix2, float iy2, float iz2, float iw2)
{
x = ix ;
y = iy ;
z = iz ;
w = iw ;
x2 = ix2 ;
y2 = iy2 ;
z2 = iz2 ;
w2 = iw2 ;
}
};
hyb_device hyb_inline static float8 operator*(const float8& l, const float8&r) {
float8 res;
float* pres = (float*)&res;
float* pl = (float*)&l;
float* pr = (float*)&r;
for(int i = 0; i < 8; ++i) {
pres[i] = pl[i] * pr[i];
}
return res;
}
hyb_device hyb_inline static float8 operator*(const float8& l, const float&r) {
float8 res;
res.x = l.x * r;
res.y = l.y * r;
res.z = l.z * r;
res.w = l.w * r;
res.x2 = l.x2 * r;
res.y2 = l.y2 * r;
res.z2 = l.z2 * r;
res.w2 = l.w2 * r;
return res;
}
hyb_device hyb_inline static float8 operator/(const float8& l, const float8& r) {
float8 res;
float* pres = (float*)&res;
float* pl = (float*)&l;
float* pr = (float*)&r;
for(int i = 0; i < 8; ++i) {
pres[i] = pl[i] / pr[i];
}
return res;
}
hyb_device hyb_inline static float8 operator/(const float8& l, const float& r) {
float8 res;
res.x = l.x / r;
res.y = l.y / r;
res.z = l.z / r;
res.w = l.w / r;
res.x2 = l.x2 / r;
res.y2 = l.y2 / r;
res.z2 = l.z2 / r;
res.w2 = l.w2 / r;
return res;
}
hyb_device hyb_inline static float8 operator-(const float8& l, const float8& r) {
float8 res;
float* pres = (float*)&res;
float* pl = (float*)&l;
float* pr = (float*)&r;
for(int i = 0; i < 8; ++i) {
pres[i] = pl[i] - pr[i];
}
return res;
}
hyb_device hyb_inline static bool8 operator<(const float8& l, const float8& r) {
bool8 res;
res.mask = 0;
float* pl = (float*)&l;
float* pr = (float*)&r;
for(int i = 0; i < 8; ++i) {
res.mask |= ((pl[i] < pr[i]) << i);
}
return res;
}
hyb_device hyb_inline static bool8 operator>(const float8& l, const float8& r) {
bool8 res;
res.mask = 0;
float* pl = (float*)&l;
float* pr = (float*)&r;
for(int i = 0; i < 8; ++i) {
res.mask |= ((pl[i] > pr[i]) << i);
}
return res;
}
hyb_device hyb_inline static bool8 operator>(const float8& l, const float& r) {
bool8 res;
res.mask = 0;
float* pl = (float*)&l;
#pragma unroll
for(int i = 0; i < 8; ++i) {
res.mask |= ((pl[i] > r) << i);
}
return res;
}
hyb_device hyb_inline static float8 operator+(const float8& l, const float8&r) {
float8 res;
float* pres = (float*)&res;
float* pl = (float*)&l;
float* pr = (float*)&r;
for(int i = 0; i < 8; ++i) {
pres[i] = pl[i] + pr[i];
}
return res;
}
hyb_device hyb_inline static float8 operator+(const float8& l, const float&r) {
float8 res;
res.x = l.x + r;
res.y = l.y + r;
res.z = l.z + r;
res.w = l.w + r;
res.x2 = l.x2 + r;
res.y2 = l.y2 + r;
res.z2 = l.z2 + r;
res.w2 = l.w2 + r;
return res;
}
namespace hybridizer {
template<typename Vec, typename Mask>
hyb_device hyb_inline Vec select(const Mask& m, const Vec& l, const Vec& r);
template<>
hyb_device hyb_inline float8 select<float8, bool8>(const bool8& m, const float8& l, const float8& r) {
float8 res;
float* pres = (float*)&res;
float* pl = (float*)&l;
float* pr = (float*)&r;
int mask = 1;
for(int i = 0; i < 8; ++i) {
if(m.mask & mask) {
pres[i] = pl[i];
}
else {
pres[i] = pr[i];
}
mask <<= 1;
}
return res;
}
template<typename Vec, typename Scal>
hyb_device hyb_inline Vec insertElement(const Vec& a, Scal elem, int index);
template<>
hyb_device hyb_inline int8 insertElement(const int8& vec, int elem, int index) {
int8 result(vec);
int* pr = (int*)&result;
int* pv = (int*)&vec;
pr[index] = pv[index];
return result;
}
template<>
hyb_device hyb_inline float8 insertElement(const float8& vec, float elem, int index) {
float8 result(vec);
float* pr = (float*)&result;
float* pv = (float*)&vec;
pr[index] = pv[index];
return result;
}
template<>
hyb_device hyb_inline float4 insertElement(const float4& vec, float elem, int index) {
float4 result(vec);
float* pr = (float*)&result;
float* pv = (float*)&vec;
pr[index] = pv[index];
return result;
}
template<typename Vec, typename Scal>
hyb_device hyb_inline Scal extractElement(const Vec& vec, int index);
template<>
hyb_device hyb_inline int extractElement(const int8& vec, int index) {
int* pv = (int*) &vec;
return pv[index];
}
template<>
hyb_device hyb_inline float extractElement(const float8& vec, int index) {
float* pv = (float*) &vec;
return pv[index];
}
template<>
hyb_device hyb_inline float extractElement(const float4& vec, int index) {
float* pv = (float*) &vec;
return pv[index];
}
template<typename VecOutput, typename VecInput, typename Mask>
hyb_device hyb_inline VecOutput shuffleVector(const VecInput& l, const VecInput& r, const Mask& m);
template<>
hyb_device hyb_inline int8 shuffleVector(const int8& l, const int8& r, const int8& m) {
int8 res;
int* pl = (int*)&l;
int* pr = (int*)&r;
int* pm = (int*)&m;
int* pres = (int*)&res;
#pragma unroll
for(int i = 0; i < 8; ++i) {
int index = pm[i];
if(index >= 0 && index < 8)
pres[i] = pl[index];
else if(index >= 0)
pres[i] = pr[index];
}
return res;
}
template<>
hyb_device hyb_inline float8 shuffleVector(const float8& l, const float8& r, const int8& m) {
float8 res;
float* pl = (float*)&l;
float* pr = (float*)&r;
int* pm = (int*)&m;
float* pres = (float*)&res;
#pragma unroll
for(int i = 0; i < 8; ++i) {
int index = pm[i];
if(index >= 0 && index < 8)
pres[i] = pl[index];
else if(index >= 0)
pres[i] = pr[index];
}
return res;
}
}
static inline __device__ float4 hybrid_ldg(float4* x) { return *x ; }
static inline __device__ double hybrid_ldg(double* x) { return *x ; }
__device__ static inline float4 operator+(float4 a, float b)
{
float4 res ;
res.x = a.x + b ;
res.y = a.y + b ;
res.z = a.z + b ;
res.w = a.w + b ;
return res ;
}
__device__ static inline float4 operator*(float4 a, float b)
{
float4 res ;
res.x = a.x * b ;
res.y = a.y * b ;
res.z = a.z * b ;
res.w = a.w * b ;
return res ;
}
__device__ static inline float4 operator+(float a, float4 b)
{
float4 res ;
res.x = a + b.x ;
res.y = a + b.y ;
res.z = a + b.z ;
res.w = a + b.w ;
return res ;
}
__device__ static inline float4 operator*(float a, float4 b)
{
float4 res ;
res.x = a * b.x ;
res.y = a * b.y ;
res.z = a * b.z ;
res.w = a * b.w ;
return res ;
}
__device__ static inline float4 operator+(float4 a, float4 b)
{
float4 res ;
res.x = a.x + b.x ;
res.y = a.y + b.y ;
res.z = a.z + b.z ;
res.w = a.w + b.w ;
return res ;
}
__device__ static inline float4 operator*(float4 a, float4 b)
{
float4 res ;
res.x = a.x * b.x ;
res.y = a.y * b.y ;
res.z = a.z * b.z ;
res.w = a.w * b.w ;
return res ;
}
/*
#if __CUDA_ARCH__ < 35
__device__ float4 hybrid_ldg(float4* x) { return *x ; }
#else
extern "C" __device__ float4 _ldg(float4*);
#define hybrid_ldg __ldg
#endif
*/
namespace hybridizer {
template <typename F> struct hybdelegate {
union {hybridobject* instance; char padding_instance[8]; };
union {F functor; char padding_functor[8]; };
// compare to null, and assign to null
hyb_device hyb_inline hybdelegate<F>& operator= (F func)
{
functor = func ;
return *this ;
}
hyb_device hyb_inline bool operator== (F func)
{
return func == functor ;
}
};
struct hybdatetime {
uint64_t ticks;
hyb_device hyb_inline int64_t get_Ticks() { return (int64_t) (ticks & 0x3fffffffffffffff); }
hyb_device hyb_inline bool operator==(hybdatetime t) { return t.ticks == ticks; }
};
}
#pragma region pointer arithmetics
namespace hybridizer {
template <typename T>
struct nativearrayindexer
{
T* _ptr ;
hyb_device hyb_inline static nativearrayindexer<T> build (void* ptr, size_t index)
{
nativearrayindexer<T> res ;
res._ptr = (T*) (((char*)ptr) + index) ;
return res ;
}
hyb_device hyb_inline static void store (const nativearrayindexer<T>* ptr, const T& value)
{
*(ptr->_ptr) = value ;
}
hyb_device hyb_inline static T* getpointer (const nativearrayindexer<T>& ptr)
{
return ptr._ptr;
}
template <typename U>
hyb_device hyb_inline static U load (const nativearrayindexer<T>* ptr)
{
return (U)(*(ptr->_ptr));
}
};
// in the special case of T = void* (which happens when code is generated from llvm), we just want to be able to
// - build
// - get the pointer and
// - cast to another type.
// load and store would have no meaning, so implementation is not provided
template <>
struct nativearrayindexer< void > {
void* _ptr;
hyb_device hyb_inline static nativearrayindexer<void> build (void* ptr, size_t index)
{
nativearrayindexer<void> res ;
res._ptr = (void*) (((char*)ptr) + index) ;
return res ;
}
hyb_device hyb_inline static void* getpointer (const nativearrayindexer<void*>& ptr)
{
return ptr._ptr;
}
template <typename U>
hyb_device hyb_inline operator nativearrayindexer<U>()
{
return *(nativearrayindexer<U>*)(void*)this;
}
};
}
#pragma endregion
#pragma region fixed buffers
namespace hybridizer
{
template<typename T, int count>
struct fixedbuffer
{
union
{
T FixedElementField ;
T __data [count] ;
} ;
} ;
}
#pragma endregion
#pragma region llvm memset/memcpy
namespace hybridizer {
template<int align> // alignment in bytes
hyb_device hyb_inline void memseti32(char* ptr, char val, int size) ;
template<>
hyb_device hyb_inline void memseti32<32>(char* ptr, char val, int size)
{
if (val == 0)
{
int4 ival; ival.x = ival.y = ival.z = ival.w = 0 ;
int4* aptr = (int4*)ptr ;
#pragma unroll
for (int i = 0 ; i < size / 16 ; ++i)
{
aptr[i] = ival ;
}
} else {
int iival = val | (((int)val) << 8) | (((int)val) << 16)| (((int)val) << 24);
int4 ival; ival.x = ival.y = ival.z = ival.w = iival ;
int4* aptr = (int4*)ptr ;
#pragma unroll
for (int i = 0 ; i < size / 16 ; ++i)
{
aptr[i] = ival ;
}
}
}
template<int align> // alignment in bytes
hyb_device hyb_inline void memseti32(char* ptr, char val, int size)
{
memset(ptr, val, size);
}
template<int align>
hyb_device hyb_inline void memcpyi32(char* ptr, char* src, int size) ;
template<>
hyb_device hyb_inline void memcpyi32<32>(char* dest, char* src, int size)
{
int4* adest = (int4*)dest ;
int4* asrc = (int4*)src ;
#pragma unroll
for (int i = 0 ; i < size / 16 ; ++i)
{
adest[i] = asrc[i] ;
}
}
template<>
hyb_device hyb_inline void memcpyi32<4>(char* dest, char* src, int size)
{
int* adest = (int*)dest ;
int* asrc = (int*)src ;
#pragma unroll
for (int i = 0 ; i < size / 4 ; ++i)
{
adest[i] = asrc[i] ;
}
}
template<int align>
hyb_device hyb_inline void memcpyi32(char* ptr, char* src, int size)
{
memcpy(ptr, src, size);
}
template<int align>
hyb_device hyb_inline void memcpyi64(char* ptr, char* src, int64_t size) ;
template<>
hyb_device hyb_inline void memcpyi64<32>(char* dest, char* src, int64_t size)
{
int4* adest = (int4*)dest ;
int4* asrc = (int4*)src ;
#pragma unroll
for (int64_t i = 0 ; i < size / 16L ; ++i)
{
adest[i] = asrc[i] ;
}
}
template<>
hyb_device hyb_inline void memcpyi64<4>(char* dest, char* src, int64_t size)
{
int* adest = (int*)dest ;
int* asrc = (int*)src ;
#pragma unroll
for (int64_t i = 0 ; i < size / 4L ; ++i)
{
adest[i] = asrc[i] ;
}
}
template<int align>
hyb_device hyb_inline void memcpyi64(char* ptr, char* src, int64_t size)
{
memcpy(ptr, src, size);
}
}
#define __hybridizer_memseti32(ptr,val,count,align,isvolatile) hybridizer::memseti32<align>(ptr,val,count)
#define __hybridizer_memcpyi32(dest,src,len,align,isvolatile) hybridizer::memcpyi32<align>(dest,src,len)
#define __hybridizer_memcpyi64(dest,src,len,align,isvolatile) hybridizer::memcpyi64<align>(dest,src,len)
#pragma endregion
#pragma region vector load/store
namespace hybridizer {
template<int alignment>
hyb_inline hyb_device float8 loadfloat8(const float8* ptr);
template<>
hyb_inline hyb_device float8 loadfloat8<32>(const float8* ptr) {
float8 res;
float4 low = ((float4*)(ptr))[0];
float4 high = ((float4*)(ptr))[1];
res.x = low.x;
res.y = low.y;
res.z = low.z;
res.w = low.w;
res.x2 = high.x;
res.y2 = high.y;
res.z2 = high.z;
res.w2 = high.w;
return res;
}
template<int alignment>
hyb_inline hyb_device float8 loadfloat8(const float8* ptr) {
return *ptr;
}
template<int alignment>
hyb_inline hyb_device void storefloat8(float8* ptr, const float8& val);
template<>
hyb_inline hyb_device void storefloat8<32>(float8* ptr, const float8& val) {
float4* iptr = (float4*) ptr;
float4* ival = (float4*) &val;
iptr[0] = ival[0];
iptr[1] = ival[1];
}
template<int alignment>
hyb_inline hyb_device void storefloat8(float8* ptr, const float8& val) {
*ptr = val;
}
#define __hybridizer_load_float8(ptr, alignment) hybridizer::loadfloat8<alignment>(ptr)
#define __hybridizer_store_float8(ptr, val, alignment) hybridizer::storefloat8<alignment>(ptr, val)
}
#pragma endregion
#pragma region actions
// #ifndef HYBRIDIZER_NO_HOST
namespace hybridizer {
// TODO?: something better than a function pointer but what??
#ifdef __cpp_variadic_templates
template<typename ...T>
struct action {
typedef void(*funcaction)(void* self, T... i);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr; char padding2[8]; };
union { void* _funcptrvect; char padding3[8]; };
hyb_device hyb_inline action() {}
hyb_device hyb_inline action(void* self, void* func) : _self(self), _funcptr((funcaction)func) {}
hyb_device hyb_inline void invoke(void* self, T... i) {
_funcptr(self, i...);
}
hyb_device hyb_inline void invoke(T... i) {
_funcptr(_self, i...);
}
};
/// action2, action3 and so on are useless in cuda (since we have no vectorization issue). We just need action
template<typename ...T>
using action2 = action<T...>;
template<typename ...T>
using action3 = action<T...>;
template<typename ...T>
using action4 = action<T...>;
#else // visual < 2015
template<typename T>
struct action {
typedef void(*funcaction)(void* self, T i);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr; char padding2[8]; };
union { void* _funcptrvect; char padding3[8]; };
hyb_device hyb_inline action() {}
hyb_device hyb_inline action(void* self, void* func) : _self(self), _funcptr((funcaction)func) {}
hyb_device hyb_inline void invoke(void* self, T i) {
_funcptr(self, i);
}
hyb_device hyb_inline void invoke(T i) {
_funcptr(_self, i);
}
};
template<typename T1, typename T2>
struct action2 {
typedef void(*funcaction)(void* self, T1 i1, T2 i2);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr; char padding2[8]; };
union { void* _funcptrvect; char padding3[8]; };
hyb_device hyb_inline action2() {}
hyb_device hyb_inline action2(void* self, void* func) : _self(self), _funcptr((funcaction)func) {}
hyb_device hyb_inline void invoke(void* self, T1 i1, T2 i2) {
_funcptr(self, i1, i2);
}
hyb_device hyb_inline void invoke(T1 i1, T2 i2) {
_funcptr(_self, i1, i2);
}
};
template<typename T1, typename T2, typename T3>
struct action3 {
typedef void(*funcaction)(void* self, T1 i1, T2 i2, T3 i3);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr; char padding2[8]; };
union { void* _funcptrvect; char padding3[8]; };
hyb_device hyb_inline action3() {}
hyb_device hyb_inline action3(void* self, void* func) : _self(self), _funcptr((funcaction)func) {}
hyb_device hyb_inline void invoke(void* self, T1 i1, T2 i2, T3 i3) {
_funcptr(self, i1, i2, i3);
}
hyb_device hyb_inline void invoke(T1 i1, T2 i2, T3 i3) {
_funcptr(_self, i1, i2, i3);
}
};
template<typename T1, typename T2, typename T3, typename T4>
struct action4 {
typedef void(*funcaction)(void* self, T1 i1, T2 i2, T3 i3, T4 i4);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr; char padding2[8]; };
union { void* _funcptrvect; char padding3[8]; };
hyb_device hyb_inline action4() {}
hyb_device hyb_inline action4(void* self, void* func) : _self(self), _funcptr((funcaction)func) {}
hyb_device hyb_inline void invoke(void* self, T1 i1, T2 i2, T3 i3, T4 i4) {
_funcptr(self, i1, i2, i3, i4);
}
hyb_device hyb_inline void invoke(T1 i1, T2 i2, T3 i3, T4 i4) {
_funcptr(_self, i1, i2, i3, i4);
}
};
#endif
// static actions
#ifndef HYBRIDIZER_NO_HOST
template<typename T, hyb_device void (*func)(T)>
#else
template<typename T, void (*func)(T)>
#endif
struct action_static
{
hyb_device hyb_inline operator action<T> () const { return action<T>(NULL, invoke_ptr) ; } // nullptr not supported by nvcc <dummy>
hyb_device hyb_inline void invoke (T t) { return func (t) ; }
hyb_device hyb_inline void invoke (void* self, T t) { return func (t) ; }
hyb_device hyb_inline static void invoke_ptr (void* self, T t) { return func (t) ; }
};
template <typename T>
hyb_device hyb_inline void parallelfor(void* self, int start, int stop, action<T>* action);
template <>
hyb_device hyb_inline void parallelfor<int>(void* self, int start, int stop, action<int>* action)
{
for(int i = __hybridizer_threadIdxX + __hybridizer_blockIdxX * __hybridizer_blockDimX + start; i < stop; i += __hybridizer_blockDimX * __hybridizer_gridDimX) {
action->invoke(self, i);
}
}
template <typename T1, typename T2>
hyb_device hyb_inline void parallelfor2D(void* self, int startX, int stopX, int startY, int stopY, action2<T1, T2>* action);
template<>
hyb_device hyb_inline void parallelfor2D(void* self, int startX, int stopX, int startY, int stopY, action2<int, int>* action) {
for (int i = __hybridizer_threadIdxY + __hybridizer_blockIdxY * __hybridizer_blockDimY + startY; i < stopY; i += __hybridizer_blockDimY * __hybridizer_gridDimY) {
for (int j = __hybridizer_threadIdxX + __hybridizer_blockIdxX * __hybridizer_blockDimX + startX; j < stopX; j += __hybridizer_blockDimX * __hybridizer_gridDimX) {
action->invoke(self, i, j);
}
}
}
template <typename T>
hyb_device hyb_inline void parallelfor(int start, int stop, action<T> action);
template <>
hyb_device hyb_inline void parallelfor<int>(int start, int stop, action<int> action)
{
for(int i = __hybridizer_threadIdxX + __hybridizer_blockIdxX * __hybridizer_blockDimX + start; i < stop; i += __hybridizer_blockDimX * __hybridizer_gridDimX) {
action.invoke(i);
}
}
template <typename T1, typename T2>
hyb_device hyb_inline void parallelfor2D(int startX, int stopX, int startY, int stopY, action2<T1, T2> action);
template <>
hyb_device hyb_inline void parallelfor2D<int>(int startX, int stopX, int startY, int stopY, action2<int, int> action)
{
for(int i = __hybridizer_threadIdxY + __hybridizer_blockIdxY * __hybridizer_blockDimY + startY; i < stopY; i += __hybridizer_blockDimY * __hybridizer_gridDimY) {
for(int j = __hybridizer_threadIdxX + __hybridizer_blockIdxX * __hybridizer_blockDimX + startX; j < stopX; j += __hybridizer_blockDimX * __hybridizer_gridDimX) {
action.invoke(i, j);
}
}
}
}
// #endif
#pragma endregion
#pragma region Func<...>
#ifndef HYBRIDIZER_NO_HOST
namespace hybridizer
{
template<typename T, typename U, typename V, typename W, typename retV>
struct func4
{
typedef retV (*funcaction)(void* self, T i, U j, V k, W l);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr ; char padding2[8]; };
union { void* _funcptrvect ; char padding3[8]; };
hyb_device hyb_inline func4 () {}
hyb_device hyb_inline func4 (void* self, void* func) : _self(self), _funcptr ((funcaction)func) { }
hyb_device hyb_inline retV invoke(void* self, T i, U j, V k, W l) const {
return _funcptr(self, i, j, k, l);
}
hyb_device hyb_inline retV invoke(T i, U j, V k, W l) const {
return _funcptr(_self, i, j, k, l);
}
} ;
template<typename T, typename U, typename V, typename W, typename retV, hyb_device retV (*funcptr)(T,U,V)>
struct func4_static
{
// http://www.cplusplus.com/forum/beginner/11866/
hyb_device hyb_inline operator func4<T,U,V,W,retV> () const { return func4<T,U,V,W,retV>(NULL, invoke_ptr) ; }
hyb_device hyb_inline retV invoke (T t, U u, V v, W w) { return funcptr (t,u,v, w) ; }
hyb_device hyb_inline retV invoke (void* self, T t, U u, V v, W w) { return funcptr (t,u,v,w) ; }
hyb_device hyb_inline static retV invoke_ptr (void* self, T t, U u, V v, W w) { return funcptr (t,u,v,w) ; }
} ;
template<typename T, typename U, typename V, typename retV>
struct func3
{
typedef retV (*funcaction)(void* self, T i, U j, V k);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr ; char padding2[8]; };
union { void* _funcptrvect ; char padding3[8]; };
hyb_device hyb_inline func3 () {}
hyb_device hyb_inline func3 (void* self, void* func) : _self(self), _funcptr ((funcaction)func) { }
hyb_device hyb_inline retV invoke(void* self, T i, U j, V k) {
return _funcptr(self, i, j, k);
}
hyb_device hyb_inline retV invoke(T i, U j, V k) {
return _funcptr(_self, i, j, k);
}
} ;
template<typename T, typename U, typename V, typename retV, hyb_device retV (*funcptr)(T,U,V)>
struct func3_static
{
// http://www.cplusplus.com/forum/beginner/11866/
hyb_device hyb_inline operator func3<T,U,V,retV> () const { return func3<T,U,V,retV>(NULL, invoke_ptr) ; }
hyb_device hyb_inline retV invoke (T t, U u, V v) { return funcptr (t,u,v) ; }
hyb_device hyb_inline retV invoke (void* self, T t, U u, V v) { return funcptr (t,u,v) ; }
hyb_device hyb_inline static retV invoke_ptr (void* self, T t, U u, V v) { return funcptr (t,u,v) ; }
} ;
template<typename T, typename U, typename retV>
struct func2
{
typedef retV (*funcaction)(void* self, T i, U j);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr ; char padding2[8]; };
union { void* _funcptrvect ; char padding3[8]; };
hyb_device hyb_inline func2 () {}
hyb_device hyb_inline func2 (void* self, void* func) : _self(self), _funcptr ((funcaction)func) { }
hyb_device hyb_inline retV invoke(void* self, T i, U j) {
return _funcptr(self, i, j);
}
hyb_device hyb_inline retV invoke(T i, U j) {
return _funcptr(_self, i, j);
}
} ;
template<typename T, typename U, typename retV, hyb_device retV (*funcptr)(T,U)>
struct func2_static
{
// http://www.cplusplus.com/forum/beginner/11866/
hyb_device hyb_inline operator func2<T,U,retV> () const { return func2<T,U,retV>(NULL, invoke_ptr) ; }
hyb_device hyb_inline retV invoke (T t, U u) { return funcptr (t,u) ; }
hyb_device hyb_inline retV invoke (void* self, T t, U u) { return funcptr (t,u) ; }
hyb_device hyb_inline static retV invoke_ptr (void* self, T t, U u) { return funcptr (t,u) ; }
} ;
template<typename S, typename T, typename U, typename retV, hyb_device retV(*funcptr)(S*, T, U)>
struct func2_capture_static
{
hyb_device hyb_inline operator func2<T, U, retV>() const { return func2<T, U, retV>(NULL, invoke_ptr); }
hyb_device hyb_inline retV invoke(T t, U u) { return funcptr(NULL, t, u); }
hyb_device hyb_inline retV invoke(S* self, T t, U u) { return funcptr(self, t, u); }
hyb_device hyb_inline static retV invoke_ptr(S* self, T t, U u) { return funcptr(self, t, u); }
};
template<typename T, typename retV>
struct func1
{
typedef retV (*funcaction)(void* self, T i);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr ; char padding2[8]; };
union { void* _funcptrvect ; char padding3[8]; };
static hyb_device hyb_inline func1<T, retV> null_ptr() {
func1 res;
return res;
}
hyb_device hyb_inline operator void*() { return _self; }
hyb_device hyb_inline func1 () {}
hyb_device hyb_inline func1 (void* self, void* func) : _self(self), _funcptr ((funcaction)func) { }
hyb_device hyb_inline retV invoke(void* self, T i) {
return _funcptr(self, i);
}
hyb_device hyb_inline retV invoke(T i) {
return _funcptr(_self, i);
}
};
template<typename T, typename retV, hyb_device retV (*funcptr)(T)>
struct func1_static
{
// http://www.cplusplus.com/forum/beginner/11866/
hyb_device hyb_inline operator func1<T,retV> () const { return func1<T,retV>(NULL, invoke_ptr) ; }
hyb_device hyb_inline retV invoke (T t) { return funcptr (t) ; }
hyb_device hyb_inline retV invoke (void* self, T t) { return funcptr (t) ; }
hyb_device hyb_inline static retV invoke_ptr (void* self, T t) { return funcptr (t) ; }
} ;
}
#endif
#pragma endregion
namespace hybridizer {
template <typename T> struct nullable {
T data;
union {
bool hasValue;
char __padding[(sizeof(T) - 4) % 4 + 4];
};
hyb_device hyb_inline bool get_HasValue() { return hasValue; }
hyb_device hyb_inline T get_Value() { return data; }
};
struct string {
const char* data;
#if !defined(_WIN32)
hyb_inline hyb_device string() {}
hyb_inline hyb_device string(const char* p) : data(p) {}
#endif
#if !defined(HYBRIDIZER_NO_HOST)
friend hyb_inline hyb_device int operator==(const hybridizer::string& s1, const hybridizer::string& s2) {
return s1.data == s2.data;
}
#endif
hyb_inline hyb_device operator const char*() {return data;}
};
#if !defined(HYBRIDIZER_NO_HOST)
hyb_device hyb_inline static void hyprintfline(FILE* f, const string& message) {
printf("%s\n", message.data);
}
#else
hyb_device hyb_inline static void hyprintfline(void* f, const string& message) {
printf("%s\n", message.data);
}
#endif
}
namespace hybridizer
{
template <typename T>
struct alignedindex
{
T inner ;
static hyb_device hyb_inline alignedindex <T> op_Implicit (T t) { alignedindex <T> res ; res.inner = t ; return res ; }
hyb_device hyb_inline T get_Inner () { return inner ; }
// hyb_device hyb_inline alignedindex<T> operator +(T rht) const { alignedindex<T> res ; res.inner = inner + rht ; return res ; }
hyb_device hyb_inline alignedindex<T> () {}
hyb_device hyb_inline alignedindex<T> (T i) { inner = i ; }
hyb_device hyb_inline operator int () const { return inner ; }
hyb_device hyb_inline bool operator<(T i) const { return inner < i ; }
hyb_device hyb_inline bool operator>(T i) const { return inner > i ; }
hyb_device hyb_inline bool operator<=(T i) const { return inner <= i ; }
hyb_device hyb_inline bool operator>=(T i) const { return inner >= i ; }
hyb_device hyb_inline bool operator==(T i) const { return inner == i ; }
hyb_device hyb_inline bool operator!=(T i) const { return inner != i ; }
} ;
template <typename T>
struct alignedstorage
{
union
{
T* inner ;
char __padding[8];
};
hyb_inline hyb_device T get_Item (const int & index) const
{
return inner[index];
}
hyb_inline hyb_device void set_Item (int index, T value) {
inner[index] = value;
}
} ;
template <typename T>
struct stackarray
{
union
{
T* inner;
char __padding[8];
};
hyb_inline hyb_device T get_Item (const int & index) const
{
return inner[index];
}
hyb_inline hyb_device void set_Item (int index, const T& value) {
inner[index] = value;
}
} ;
}
namespace hybridizer
{
template<typename T>
hyb_inline hyb_device void swap (T* a, T* b)
{
T p = *b ; *b = *a ; *a = p ;
}
}
namespace hybridizer {
template<typename From, typename To>
hyb_inline hyb_device To bitcast(const From& f)
{
return *((To*)&f);
}
}
#ifdef HYB_CUDA_HALF
#include <hybridizer.cuda.half.cuh>
#endif
namespace hybridizer {
hyb_inline hyb_device bool enforce_serial()
{
return threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0;
}
}
// ----- /HYBRIDIZER_CUDA_CUH -----
#include <cstdio>
// generating GetTypeID function
#include <cstring> // for strcmp
extern "C" DLL_PUBLIC int HybridizerGetTypeID( const char* fullTypeName)
{
if (strcmp (fullTypeName, "Hybrid.Program") == 0) return 1000000 ;
if (strcmp (fullTypeName, "Hybridizer.Runtime.CUDAImports.IntResidentArray") == 0) return 1000001 ;
if (strcmp (fullTypeName, "Hybridizer.Runtime.CUDAImports.ResidentArrayStatus") == 0) return 1000002 ;
if (strcmp (fullTypeName, "System.Action<System.Int32>") == 0) return 1000003 ;
if (strcmp (fullTypeName, "System.Nullable<System.Int64>") == 0) return 1000004 ;
if (strcmp (fullTypeName, "System.Object") == 0) return 1000005 ;
if (strcmp (fullTypeName, "System.Threading.Tasks.Parallel") == 0) return 1000006 ;
if (strcmp (fullTypeName, "System.Threading.Tasks.ParallelLoopResult") == 0) return 1000007 ;
return 0 ;
}
extern "C" DLL_PUBLIC const char* HybridizerGetTypeFromID( const int typeId)
{
if (typeId == 1000000) return "Hybrid.Program" ;
if (typeId == 1000001) return "Hybridizer.Runtime.CUDAImports.IntResidentArray" ;
if (typeId == 1000002) return "Hybridizer.Runtime.CUDAImports.ResidentArrayStatus" ;
if (typeId == 1000003) return "System.Action<System.Int32>" ;
if (typeId == 1000004) return "System.Nullable<System.Int64>" ;
if (typeId == 1000005) return "System.Object" ;
if (typeId == 1000006) return "System.Threading.Tasks.Parallel" ;
if (typeId == 1000007) return "System.Threading.Tasks.ParallelLoopResult" ;
return "" ;
}
extern "C" DLL_PUBLIC int HybridizerGetShallowSize (const char* fullTypeName)
{
#ifdef __TYPE_DECL__Hybrid_Program___
if (strcmp (fullTypeName, "Hybrid.Program") == 0) return 8 ;
#endif
#ifdef __TYPE_DECL__Hybridizer_Runtime_CUDAImports_IntResidentArray___
if (strcmp (fullTypeName, "Hybridizer.Runtime.CUDAImports.IntResidentArray") == 0) return 32 ;
#endif
#ifdef __TYPE_DECL_int__
if (strcmp (fullTypeName, "Hybridizer.Runtime.CUDAImports.ResidentArrayStatus") == 0) return 4 ;
#endif
#ifdef __TYPE_DECL_hybridizer_action__T____
if (strcmp (fullTypeName, "System.Action<System.Int32>") == 0) return 16 ;
#endif
#ifdef __TYPE_DECL_hybridizer_nullable__T____
if (strcmp (fullTypeName, "System.Nullable<System.Int64>") == 0) return 16 ;
#endif
#ifdef __TYPE_DECL_hybridizer_hybridobject___
if (strcmp (fullTypeName, "System.Object") == 0) return 8 ;
#endif
#ifdef __TYPE_DECL__System_Threading_Tasks_ParallelLoopResult__
if (strcmp (fullTypeName, "System.Threading.Tasks.ParallelLoopResult") == 0) return 24 ;
#endif
return 0 ;
}
// Get various Hybridizer properties at runtime
struct __hybridizer_properties {
int32_t UseHybridArrays;
int32_t Flavor;
int32_t CompatibilityMode;
int32_t _dummy;
};
extern "C" DLL_PUBLIC __hybridizer_properties __HybridizerGetProperties () {
__hybridizer_properties res;
res.UseHybridArrays = 0;
res.Flavor = 1;
res.CompatibilityMode = 0;
return res ;
}
#include <hip/hip_runtime.h>
struct HybridModule
{
void* module_data ;
hipModule_t module ;
} ;
extern char __hybridizer_cubin_module_data [] ;
static HybridModule __hybridizer__gs_module = { 0 };
#pragma region Wrappers definitions
extern "C" DLL_PUBLIC int Hybridx46Programx46Create_pre_basis_ExternCWrapper_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, Hybridizer::Runtime::CUDAImports::IntResidentArray* const equation, Hybridizer::Runtime::CUDAImports::IntResidentArray* const pre_basis_main, int N)
{
hipError_t cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
if (cures != hipSuccess) return (int)cures ;
}
hipFunction_t __hybridizer__cufunc ;
cures = hipModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Hybridx46Programx46Create_pre_basis") ;
if (cures != hipSuccess) return (int)cures ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[5] =
{
(void*)&__hybridizer_runtime,
(void*)&equation,
(void*)&pre_basis_main,
(void*)&N,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = hipModuleLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config, 0) ;
if (cures != hipSuccess) return (int)cures ;
int cudaLaunchRes = (int)::hipPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::hipDeviceSynchronize () ;
return __synchronizeRes ;
}
extern "C" DLL_PUBLIC int Hybridx46Programx46Substitute_ExternCWrapper_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, int* const equation, Hybridizer::Runtime::CUDAImports::IntResidentArray* const pre_basis, Hybridizer::Runtime::CUDAImports::IntResidentArray* const result, int equationLength, int pre_basisLengthAxis0)
{
hipError_t cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
if (cures != hipSuccess) return (int)cures ;
}
hipFunction_t __hybridizer__cufunc ;
cures = hipModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Hybridx46Programx46Substitute") ;
if (cures != hipSuccess) return (int)cures ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[7] =
{
(void*)&__hybridizer_runtime,
(void*)&equation,
(void*)&pre_basis,
(void*)&result,
(void*)&equationLength,
(void*)&pre_basisLengthAxis0,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = hipModuleLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config, 0) ;
if (cures != hipSuccess) return (int)cures ;
int cudaLaunchRes = (int)::hipPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::hipDeviceSynchronize () ;
return __synchronizeRes ;
}
extern "C" DLL_PUBLIC int Hybridx46Programx46Multiply_pre_basis_ExternCWrapper_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, Hybridizer::Runtime::CUDAImports::IntResidentArray* const big_pre_basis, Hybridizer::Runtime::CUDAImports::IntResidentArray* const small_pre_basis, Hybridizer::Runtime::CUDAImports::IntResidentArray* const result, int big_pre_basisLengthAxis1, int small_pre_basisLengthAxis0, int small_pre_basisLengthAxis1)
{
hipError_t cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
if (cures != hipSuccess) return (int)cures ;
}
hipFunction_t __hybridizer__cufunc ;
cures = hipModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Hybridx46Programx46Multiply_pre_basis") ;
if (cures != hipSuccess) return (int)cures ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[8] =
{
(void*)&__hybridizer_runtime,
(void*)&big_pre_basis,
(void*)&small_pre_basis,
(void*)&result,
(void*)&big_pre_basisLengthAxis1,
(void*)&small_pre_basisLengthAxis0,
(void*)&small_pre_basisLengthAxis1,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = hipModuleLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config, 0) ;
if (cures != hipSuccess) return (int)cures ;
int cudaLaunchRes = (int)::hipPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::hipDeviceSynchronize () ;
return __synchronizeRes ;
}
extern "C" DLL_PUBLIC int Hybridx46Programx46Simplify_ExternCWrapper_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, Hybridizer::Runtime::CUDAImports::IntResidentArray* const ar, Hybridizer::Runtime::CUDAImports::IntResidentArray* const gcds, int arLengthAxis0, int arLengthAxis1)
{
hipError_t cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
if (cures != hipSuccess) return (int)cures ;
}
hipFunction_t __hybridizer__cufunc ;
cures = hipModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Hybridx46Programx46Simplify") ;
if (cures != hipSuccess) return (int)cures ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[6] =
{
(void*)&__hybridizer_runtime,
(void*)&ar,
(void*)&gcds,
(void*)&arLengthAxis0,
(void*)&arLengthAxis1,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = hipModuleLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config, 0) ;
if (cures != hipSuccess) return (int)cures ;
int cudaLaunchRes = (int)::hipPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::hipDeviceSynchronize () ;
return __synchronizeRes ;
}
#pragma endregion
| 591a1fcdf644a2dedd1965369ef8e879d53807ba.cu | // Generated by Hybridizer version 1.0.0.0
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#if defined(__CUDACC__)
#ifndef hyb_device
#define hyb_inline __forceinline__
#define hyb_constant __constant__
#if defined(HYBRIDIZER_NO_HOST)
#define hyb_host
#define hyb_device __device__
#else
#define hyb_host __host__
#define hyb_device __device__
#endif
#endif
#else
#ifndef hyb_device
#define hyb_inline inline
#define hyb_device
#define hyb_constant
#endif
#endif
#pragma once
#if defined _WIN32 || defined _WIN64 || defined __CYGWIN__
#define BUILDING_DLL
#ifdef BUILDING_DLL
#ifdef __GNUC__
#define DLL_PUBLIC __attribute__ ((dllexport))
#else
#define DLL_PUBLIC __declspec(dllexport) // Note: actually gcc seems to also supports this syntax.
#endif
#else
#ifdef __GNUC__
#define DLL_PUBLIC __attribute__ ((dllimport))
#else
#define DLL_PUBLIC __declspec(dllimport) // Note: actually gcc seems to also supports this syntax.
#endif
#endif
#define DLL_LOCAL
#else
#if __GNUC__ >= 4
#define DLL_PUBLIC __attribute__ ((visibility ("default")))
#define DLL_LOCAL __attribute__ ((visibility ("hidden")))
#else
#define DLL_PUBLIC
#define DLL_LOCAL
#endif
#endif
// hybridizer core types
#include <cstdint>
namespace hybridizer { struct hybridobject ; }
namespace hybridizer { struct runtime ; }
#pragma region defined enums and types
#ifndef __ENUM_DECL_Hybridizer_Runtime_CUDAImports_ResidentArrayStatus__
#define __ENUM_DECL_Hybridizer_Runtime_CUDAImports_ResidentArrayStatus__
namespace Hybridizer { namespace Runtime { namespace CUDAImports {
enum struct ResidentArrayStatus
{
NoAction = 0,
DeviceNeedsRefresh = 1,
HostNeedsRefresh = 2,
} ;
} } } // Leaving namespace
#endif // __ENUM_DECL_Hybridizer_Runtime_CUDAImports_ResidentArrayStatus__
// Intrinsic type cudaError_t used
#define __TYPE_DECL_cudaError_t__
#if defined(__cplusplus) || defined(__CUDACC__)
#ifndef __ENUM_DECL_Hybridizer_Runtime_CUDAImports_ResidentArrayStatus__
#define __ENUM_DECL_Hybridizer_Runtime_CUDAImports_ResidentArrayStatus__
namespace Hybridizer { namespace Runtime { namespace CUDAImports {
enum struct ResidentArrayStatus
{
NoAction = 0,
DeviceNeedsRefresh = 1,
HostNeedsRefresh = 2,
} ;
} } } // Leaving namespace
#endif // __ENUM_DECL_Hybridizer_Runtime_CUDAImports_ResidentArrayStatus__
namespace Hybridizer { namespace Runtime { namespace CUDAImports {
struct IResidentArray ;
} } } // Leaving namespace
namespace Hybridizer { namespace Runtime { namespace CUDAImports {
struct IResidentData ;
} } } // Leaving namespace
namespace System {
struct IDisposable ;
} // Leaving namespace
// Intrinsic type cudaError_t used
#define __TYPE_DECL_cudaError_t__
namespace Hybridizer { namespace Runtime { namespace CUDAImports {
struct IntResidentArray ;
} } } // Leaving namespace
namespace Hybrid {
struct Program ;
} // Leaving namespace
namespace Hybrid {
struct Program___c__DisplayClass1_0 ;
} // Leaving namespace
namespace System { namespace Threading { namespace Tasks {
struct Parallel ;
} } } // Leaving namespace
// Intrinsic type Nullable`1 used
#define __TYPE_DECL_hybridizer_nullable__int64_t____
namespace System { namespace Threading { namespace Tasks {
struct ParallelLoopResult ;
} } } // Leaving namespace
// Intrinsic type Action`1 used
#define __TYPE_DECL_hybridizer_action__int____
namespace Hybrid {
struct Program___c__DisplayClass2_0 ;
} // Leaving namespace
namespace Hybrid {
struct Program___c__DisplayClass3_0 ;
} // Leaving namespace
namespace Hybrid {
struct Program___c__DisplayClass7_0 ;
} // Leaving namespace
#endif // TOTO
#pragma endregion
extern "C" void* __hybridizer_init_basic_runtime();
// ----- HYBRIDIZER_CUDA_CUH -----
#pragma once // hybridizer.cuda.cuh
#if defined(__CUDACC_RTC__)
#define HYBRIDIZER_NO_HOST
#endif
#if !defined(HYBRIDIZER_NO_HOST)
#include <cuda.h>
#include <stdint.h>
#include <stdio.h>
#else
typedef signed char int8_t;
typedef short int16_t;
typedef int int32_t;
typedef long long int64_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef unsigned long long uint64_t;
#endif
#if !defined(DLL_PUBLIC)
#if defined _WIN32 || defined _WIN64 || defined __CYGWIN__
#define BUILDING_DLL
#ifdef BUILDING_DLL
#ifdef __GNUC__
#define DLL_PUBLIC __attribute__ ((dllexport))
#else
#define DLL_PUBLIC __declspec(dllexport) // Note: actually gcc seems to also supports this syntax.
#endif
#else
#ifdef __GNUC__
#define DLL_PUBLIC __attribute__ ((dllimport))
#else
#define DLL_PUBLIC __declspec(dllimport) // Note: actually gcc seems to also supports this syntax.
#endif
#endif
#define DLL_LOCAL
#else
#if __GNUC__ >= 4
#define DLL_PUBLIC __attribute__ ((visibility ("default")))
#define DLL_LOCAL __attribute__ ((visibility ("hidden")))
#else
#define DLL_PUBLIC
#define DLL_LOCAL
#endif
#endif
#endif
#if UINTPTR_MAX == 0xffffffffffffffff
/* 64-bit */
#else
// WTF ??? #define NO_EXCEPTION
#endif
#define HYBRIDIZER_GETTYPE_ID hybridizer::gettypeid
template<typename T> struct __hybridizer_argument_type;
template<typename T, typename U> struct __hybridizer_argument_type<T(U)> { typedef U type; };
#define HYBRIDIZER_QUERYINTERFACE(...) hybridizer::queryinterface<__hybridizer_argument_type<void(__VA_ARGS__)>::type>
#define HYBRIDIZER_SIGNAL_PURE_VIRTUAL(a, b) hybridizer::signal_pure_virtual(a, b)
#define HYBRIDIZER_FUNC3_INVOKE(functor,left,middle,right) (*functor) . invoke (left, middle, right)
#define HYBRIDIZER_FUNC2_INVOKE(functor,left,right) (*functor) . invoke (left, right)
#define HYBRIDIZER_FUNC1_INVOKE(functor,left) (*functor) . invoke (left)
#if defined(__CUDACC__)
#ifndef hyb_device
#define hyb_inline __forceinline__
#define hyb_constant __constant__
#if defined(HYBRIDIZER_NO_HOST)
#define hyb_host
#define hyb_device __device__
#else
#define hyb_host __host__
#define hyb_device __device__
#endif
#endif
#else
#ifndef hyb_device
#define hyb_inline inline
#define hyb_device
#define hyb_constant
#endif
#endif
#if defined(_MSC_VER)
#define STRUCT_ALIGNED_(x) __declspec(align(x))
#else
#if defined(__GNUC__)
#define STRUCT_ALIGNED_(x) __attribute__ ((aligned(x)))
#endif
#endif
#define __hybridizer_threadIdxX threadIdx.x
#define __hybridizer_threadIdxY threadIdx.y
#define __hybridizer_threadIdxZ threadIdx.z
#define __hybridizer_blockDimX blockDim.x
#define __hybridizer_blockDimY blockDim.y
#define __hybridizer_blockDimZ blockDim.z
#define __hybridizer_blockIdxX blockIdx.x
#define __hybridizer_blockIdxY blockIdx.y
#define __hybridizer_blockIdxZ blockIdx.z
#define __hybridizer_gridDimX gridDim.x
#define __hybridizer_gridDimY gridDim.y
#define __hybridizer_gridDimZ gridDim.z
#define __hybridizer_threadIdxXX64 threadIdx.x
#define __hybridizer_threadIdxYX64 threadIdx.y
#define __hybridizer_threadIdxZX64 threadIdx.z
#define __hybridizer_blockDimXX64 blockDim.x
#define __hybridizer_blockDimYX64 blockDim.y
#define __hybridizer_blockDimZX64 blockDim.z
#define __hybridizer_blockIdxXX64 blockIdx.x
#define __hybridizer_blockIdxYX64 blockIdx.y
#define __hybridizer_blockIdxZX64 blockIdx.z
#define __hybridizer_gridDimXX64 gridDim.x
#define __hybridizer_gridDimYX64 gridDim.y
#define __hybridizer_gridDimZX64 gridDim.z
extern __shared__ char __hybridizer_cuda_local_shared [] ;
#if defined(HYBRIDIZER_NULL_CHECKS_THROW_TRAP) || defined (HYBRIDIZER_NULL_CHECKS_BREAK) || defined (HYBRIDIZER_NULL_CHECKS_PRINT)
#define HYBRIDIZER_NULL_CHECKS
#endif
#ifdef HYBRIDIZER_NULL_CHECKS
template<typename T>
hyb_device hyb_inline static T* __hybridizer_null_check(T* input, const char* file, int line) {
if(nullptr == input) {
#ifdef HYBRIDIZER_NULL_CHECKS_BREAK
asm("brkpt;");
#elif defined (HYBRIDIZER_NULL_CHECKS_THROW_TRAP)
asm("trap;");
#elif defined(HYBRIDIZER_NULL_CHECKS_PRINT)
printf("null pointer at %s:%d\n", file, line);
#else
#endif
}
return input;
}
#define HYBRIDIZER_NULL_CHECK(param) __hybridizer_null_check(param, __FILE__, __LINE__)
#else
#define HYBRIDIZER_NULL_CHECK(param) (param)
#endif
namespace hybridizer {
template <typename T> __device__ T shuffle (T t, int srcLane) { return __shfl (t, srcLane) ; }
template <typename T> __device__ T shuffleup (T t, unsigned int shift) { return __shfl_up (t, shift) ; }
template <typename T> __device__ T shuffledown (T t, unsigned int shift) { return __shfl_down (t, shift) ; }
template <typename T> __device__ T shufflexor (T t, unsigned int shift) { return __shfl_xor (t, shift) ; }
}
namespace hybridizer {
template <typename T, int rank>
struct hybarray;
}
namespace hybridizer {
struct hybridobject { union { void* _vtable ; int _typeid ; char _vtable_padding[8] ; } ; } ;
struct datetime { long long _date; } ;
__forceinline__ hyb_device static int gettypeid (void* ptr) {
if (ptr == 0) return 0 ;
return ((hybridobject*)ptr)->_typeid ;
}
template<typename T>
__forceinline__ hyb_device static T queryinterface (hybridobject* ptr) {
return ((T)ptr) ;
}
template<typename T>
__forceinline__ hyb_device static T queryinterface (void* ptr) {
return ((T)ptr) ;
}
template<typename T>
hyb_inline hyb_device static T constrained (T* ptr) { return *ptr; }
template<typename T>
hyb_inline hyb_device static T constrained (T const * ptr) { return *ptr; }
template<typename T>
hyb_inline hyb_device static T constrained (T& ptr) { return ptr; }
template<typename T>
hyb_inline hyb_device static T constrained (T const & ptr) { return ptr; }
template<typename T>
struct sharedmemoryallocator
{
// TODO : HYB-794 => shared max allocation => error !!
// Allocate a hybarray in shared memory (can be automatically converted to raw pointer if needed)
__forceinline__ __device__ hybridizer::hybarray<T, 1> allocate(int count)
{
hybridizer::hybarray<T, 1> res ;
res.ptr = (T*) (&(__hybridizer_cuda_local_shared[(*((int*)__hybridizer_cuda_local_shared))])) ;
res.length[0] = count;
res.lowerBound[0] = 0;
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
int sizeElt = sizeof(T);
int size = count * sizeElt;
(*((int*)__hybridizer_cuda_local_shared)) += size ;
}
return res ;
}
// Allocate a raw pointer in shared memory
__forceinline__ __device__ T* allocate_raw(int count)
{
T* res = (T*) (&(__hybridizer_cuda_local_shared[(*((int*)__hybridizer_cuda_local_shared))])) ;
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
(*((int*)__hybridizer_cuda_local_shared)) += count * sizeof (T) ;
}
return res ;
}
int initialOffset ;
bool resetAtDestruction;
__forceinline__ __device__ sharedmemoryallocator(bool reset = true)
{
initialOffset = (*((int*)__hybridizer_cuda_local_shared)) ;
resetAtDestruction = reset;
}
__forceinline__ __device__ ~sharedmemoryallocator()
{
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && resetAtDestruction) {
if ((*((int*)__hybridizer_cuda_local_shared)) > initialOffset)
*((int*)__hybridizer_cuda_local_shared) = initialOffset;
}
}
} ;
hyb_inline hyb_device double getarraydouble(void* unused, void* ar, int idx) { return ((double*)ar)[idx] ; }
hyb_inline hyb_device void setarraydouble(void* unused, void* ar, int idx, double value) { ((double*)ar)[idx] = value ; }
hyb_inline hyb_device float getarrayfloat(void* unused, void* ar, int idx) { return ((float*)ar)[idx] ; }
hyb_inline hyb_device void setarrayfloat(void* unused, void* ar, int idx, float value) { ((float*)ar)[idx] = value ; }
hyb_inline hyb_device float getarrayint(void* unused, void* ar, int idx) { return ((int*)ar)[idx] ; }
hyb_inline hyb_device void setarrayint(void* unused, void* ar, int idx, float value) { ((int*)ar)[idx] = value ; }
template<typename T> hyb_inline hyb_device T getarray(void* unused, void* ar, int idx) { return ((T*)ar)[idx] ; }
template<typename T> hyb_inline hyb_device void setarray(void* unused, void* ar, int idx, T value) { ((T*)ar)[idx] = value ; }
}
__forceinline__ __device__ char* SharedMemoryPointer(int count)
{
hybridizer::sharedmemoryallocator<char> allocator;
allocator.resetAtDestruction = false;
return allocator.allocate_raw(count);
}
__forceinline__ __device__ char* GetSharedMemoryArray() {
return (char*) __hybridizer_cuda_local_shared;
}
/*
namespace hybridizer
{
void launch_kernel(int griddimx, int griddimy, int blockdimx, int blockdimy, int blockdimz, int shared,
void (*kernel) ())
{
dim3 grid (griddimx, griddimy, 1);
dim3 block(blockdimx, blockdimy, blockdimz);
kernel<<<grid, block, shared>>>();
}
template <typename T>
void launch_kernel(int griddimx, int griddimy, int blockdimx, int blockdimy, int blockdimz, int shared,
void (*kernel) (T arg), T a)
{
dim3 grid (griddimx, griddimy, 1);
dim3 block(blockdimx, blockdimy, blockdimz);
kernel<<<grid, block, shared>>>(a);
}
template <typename T1, typename T2>
void launch_kernel(int griddimx, int griddimy, int blockdimx, int blockdimy, int blockdimz, int shared,
void (*kernel) (T1 arg1, T2 arg2), T1 a, T2 b) {
dim3 grid (griddimx, griddimy, 1);
dim3 block(blockdimx, blockdimy, blockdimz);
kernel<<<grid, block, shared>>>(a, b);
}
template <typename T1, typename T2, typename T3>
void launch_kernel(int griddimx, int griddimy, int blockdimx, int blockdimy, int blockdimz, int shared,
void (*kernel) (T1 arg1, T2 arg2, T3 arg3), T1 a, T2 b, T3 c) {
dim3 grid (griddimx, griddimy, 1);
dim3 block(blockdimx, blockdimy, blockdimz);
kernel<<<grid, block, shared>>>(a, b, c);
}
template <typename T1, typename T2, typename T3, typename T4>
void launch_kernel(int griddimx, int griddimy, int blockdimx, int blockdimy, int blockdimz, int shared,
void (*kernel) (T1 arg1, T2 arg2, T3 arg3, T4 arg4), T1 a, T2 b, T3 c, T4 d) {
dim3 grid (griddimx, griddimy, 1);
dim3 block(blockdimx, blockdimy, blockdimz);
kernel<<<grid, block, shared>>>(a, b, c, d);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5>
void launch_kernel(int griddimx, int griddimy, int blockdimx, int blockdimy, int blockdimz, int shared,
void (*kernel) (T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5), T1 a, T2 b, T3 c, T4 d, T5 e) {
dim3 grid (griddimx, griddimy, 1);
dim3 block(blockdimx, blockdimy, blockdimz);
kernel<<<grid, block, shared>>>(a, b, c, d, e);
}
// TODO: more versions or variadic templates
}
*/
/*
template <> __device__ int hybridizer::constrained<int,int*>(int* i) { return *i ; }
template <> __device__ int hybridizer::constrained<int,int>(int& i) { return i ; }
*/
#if !defined(__CUDA_ARCH__)
#include <limits>
#endif
#ifndef HYBRIDIZER_NO_HOST
#include <stdio.h>
#include <stdarg.h>
#define hybridizer__hyprintfva(f, format, ...) printf (format, __VA_ARGS__)
#define hybridizer__hyprintflineva(f, format, ...) hybridizer__hyprintfva (f, format "\n", __VA_ARGS__)
namespace hybridizer
{
inline __device__ static FILE* get_Out() {
return 0 ;
}
inline __device__ static void hyprintf(FILE* f, const char* message) {
printf(message);
}
inline __device__ static void hyprintfline(FILE* f, const char* message) {
printf("%s\n", message);
}
};
#include <math_constants.h>
namespace hybridizer
{
#if defined(__CUDA_ARCH__)
template <typename T> __device__ hyb_host T nan () ;
template <typename T> __device__ hyb_host T pos_infinity () ;
template <typename T> __device__ hyb_host T neg_infinity () ;
template <> __forceinline__ __device__ float nan<> () { return CUDART_NAN_F ; }
template <> __forceinline__ __device__ float pos_infinity<> () { return CUDART_INF_F ; }
template <> __forceinline__ __device__ float neg_infinity<> () { return - CUDART_INF_F ; }
template <> __forceinline__ __device__ double nan<> () { return CUDART_NAN ; }
template <> __forceinline__ __device__ double pos_infinity<> () { return CUDART_INF ; }
template <> __forceinline__ __device__ double neg_infinity<> () { return - CUDART_INF ; }
#endif
#if !defined(__CUDA_ARCH__)
template <typename T> __device__ T nan () { return std::numeric_limits<T>::quiet_NaN(); }
template <typename T> __device__ T pos_infinity () { return std::numeric_limits<T>::infinity(); }
template <typename T> __device__ T neg_infinity () { return - std::numeric_limits<T>::infinity(); }
#endif
};
#else
// HYBRIDIZER_NO_HOST
#define hybridizer__hyprintfva(f, format, ...) printf (format, __VA_ARGS__)
#define hybridizer__hyprintflineva(f, format, ...) hybridizer__hyprintfva (f, format "\n", __VA_ARGS__)
typedef void FILE ;
namespace hybridizer
{
inline __device__ static FILE* get_Out() {
return 0 ;
}
inline __device__ static void hyprintf(FILE* f, const char* message) {
printf(message);
}
inline __device__ static void hyprintfline(FILE* f, const char* message) {
printf("%s\n", message);
}
};
// for NVRTC
namespace hybridizer
{
#if defined(__CUDA_ARCH__)
template <typename T> __device__ hyb_host T nan () ;
template <typename T> __device__ hyb_host T pos_infinity () ;
template <typename T> __device__ hyb_host T neg_infinity () ;
template <> __forceinline__ __device__ float nan<> () { return __int_as_float(0x7fffffff); }
template <> __forceinline__ __device__ float pos_infinity<> () { return __int_as_float(0x7f800000) ; }
template <> __forceinline__ __device__ float neg_infinity<> () { return - __int_as_float(0x7f800000) ; }
template <> __forceinline__ __device__ double nan<> () { return __longlong_as_double(0xfff8000000000000ULL) ; }
template <> __forceinline__ __device__ double pos_infinity<> () { return __longlong_as_double(0x7ff0000000000000ULL) ; }
template <> __forceinline__ __device__ double neg_infinity<> () { return - __longlong_as_double(0x7ff0000000000000ULL) ; }
#endif
}
#endif
namespace hybridizer
{
__device__ static void signal_pure_virtual (const char* method, int id)
{
// TODO : throw exception...
::printf ("Pure virtual call for method <%s> on type id <%d> (CUDA flavor)\n", method, id) ;
}
};
namespace hybridizer
{
struct runtime;
__forceinline__ __device__ void initruntime(hybridizer::runtime* rt)
{
if (threadIdx.x == 0 && threadIdx.y == 0)
{
// init shared memory offset to zero
(*((int*)__hybridizer_cuda_local_shared)) = 16 ;
(*(((int*)__hybridizer_cuda_local_shared) + 1)) = 0 ;
#ifndef NO_EXCEPTION
(*((runtime**)(__hybridizer_cuda_local_shared + 8))) = rt ;
#endif
}
__syncthreads();
}
__forceinline__ __device__ hybridizer::runtime* getruntime()
{
#ifndef NO_EXCEPTION
return (*((runtime**)(__hybridizer_cuda_local_shared + 8))) ;
#else
return 0;
#endif
}
}
#ifndef NO_EXCEPTION
#pragma once
//#define HYBRIDIZER_EXCEPTIONS_HANDLE_THREAD
#include <cuda_runtime_api.h>
#define CUDA_CHECK(x) {if (x != cudaSuccess) {printf("Cuda error %d\n", x); exit(-1);} }
// EXCEPTION MODES
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
#define HYBRIDIZER_EXCEPTIONS_MODE_DEFINED
#endif
#ifdef HYBRIDIZER_EXCEPTIONS_HANDLE_THREAD
#define HYBRIDIZER_EXCEPTIONS_MODE_DEFINED
#endif
#ifdef HYBRIDIZER_EXCEPTIONS_HANDLE_BLOCK
#define HYBRIDIZER_EXCEPTIONS_MODE_DEFINED
#endif
#ifdef HYBRIDIZER_EXCEPTIONS_NONE
#define NO_EXCEPTION
#endif
#ifdef NO_EXCEPTION
#define HYBRIDIZER_EXCEPTIONS_MODE_DEFINED
#endif
#ifndef HYBRIDIZER_EXCEPTIONS_MODE_DEFINED
#define NO_EXCEPTION
#endif
#pragma region Runtime : Exceptions management
#ifndef __FILE_ID__
#define __FILE_ID__ 42
#endif
#pragma region Bit scan forward intrinsics
#if defined(_MSC_VER) && !defined(__clang__)
#include <intrin.h>
#pragma intrinsic(_BitScanForward)
static hyb_inline uint32_t bit_scan_forward(uint32_t x)
{
unsigned long res;
int status = _BitScanForward(&res, x);
return status ? res + 1 : 0;
}
extern void _hyb_raise_exception(int errorCode);
#elif defined (__INTEL_COMPILER)
static hyb_inline uint32_t bit_scan_forward(uint32_t x)
{
int r;
__asm__("bsf %1,%0" : "=r"(r) : "X"(x));
return r;
}
#elif defined(__clang__)
static hyb_inline uint32_t bit_scan_forward(uint32_t x)
{
return (uint32_t)__builtin_ffs (x) ;
}
#elif defined(__GNUC__)
static hyb_inline uint32_t bit_scan_forward(uint32_t x)
{
return (uint32_t)__builtin_ffs (x) ;
}
#else
#error "Unsupported compiler - no ffs intrinsic"
#endif
#pragma endregion
#ifndef NO_EXCEPTION
#include <thread>
#include <map>
#include <driver_types.h>
#include <mutex>
#endif
namespace hybridizer
{
struct exceptionframe
{
int filenum;
int linenum;
};
struct exceptionentry
{
union { void* exceptiondata; exceptionframe frame; };
hyb_host hyb_device hyb_inline exceptionentry(int file, int line)
{
frame.filenum = file;
frame.linenum = line;
}
hyb_host hyb_device hyb_inline exceptionentry(void* data)
{
exceptiondata = data;
}
};
struct exceptioninstance
{
union { void* __vtable; int __typeid; };
};
struct exceptionstack
{
exceptionentry entries[1023];
int count;
int location;
int code;
hyb_device hyb_inline void set(exceptionentry entry, int loc, int code)
{
count = 1;
entries[0] = entry;
location = loc;
this->code = code;
}
hyb_device hyb_inline void populate(int file, int line)
{
entries[count].frame.filenum = file;
entries[count].frame.linenum = line;
++count;
if (count == 1023) count = 1022; // skip the topmost frames...
}
};
struct baseexception
{
hyb_device hyb_inline static int hybridizer_typeid() {return 0;}
union { void* __vtable; int __typeid; };
union { char* _message; char __message_padding[8]; };
};
struct gpuexception : baseexception
{
};
struct nullpointerexception : baseexception {};
struct indexoutofboundsexception : baseexception {};
template <typename T>
struct exceptionhandle
{
T exception;
exceptionentry* stack; // this is only valid until the next throw...
int stacksize;
hyb_device hyb_inline exceptionhandle() {}
hyb_device hyb_inline exceptionhandle(T ex) { exception = ex; }
};
class runtime
{
public:
typedef void (* exception_callback_type)(const int code);
#ifndef HYBRIDIZER_NO_HOST
hyb_inline hyb_host static runtime* host_getruntime(cudaStream_t stream);
hyb_inline hyb_host static runtime* host_getruntime();
hyb_inline hyb_host void host_release();
hyb_inline hyb_host static void hostrethrow(runtime* hrt);
#endif
hyb_device hyb_inline static void init(runtime* rt);
template<typename extype>
hyb_device hyb_inline static void* allocateinstance()
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
return nullptr;
#else
runtime* local_runtime = (runtime*)getruntime();
int index = atomicAdd(&(local_runtime->exceptioninstancescount), sizeof(extype));
return (void*)(local_runtime->exceptioninstances + index);
#endif
#else
return NULL;
#endif
}
int exceptionstatus[64]; // only valid in shared memory
exceptionstack* exceptionstacks; // One stack per thread in grid (can be big)
unsigned exceptioncount;
unsigned exceptioninstancescount;
unsigned gridsize; // Currently allocated exceptionstacks
int* exceptionentries;
char* exceptioninstances;
exception_callback_type _exception_callback;
private:
cudaStream_t _stream;
runtime(cudaStream_t stream) : _stream(stream) {}
#ifndef NO_EXCEPTION
static std::map<std::pair<std::thread::id, cudaStream_t>, runtime*> g_runtime_dict;
static std::mutex g_dict_mutex;
#endif
public:
hyb_device hyb_inline static int threadid()
{
return threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
}
hyb_device hyb_inline static int blockid()
{
return blockIdx.x + gridDim.x * (blockIdx.y + gridDim.y * blockIdx.z);
}
hyb_device hyb_inline static bool inexception()
{
#ifndef NO_EXCEPTION
//if (reinterpret_cast<int*>(__hybridizer_cuda_local_shared)[1] == 0)
// return false;
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
return false;
#else
runtime* local_runtime = (runtime*)getruntime();
int tid = threadid();
__syncthreads();
int warpStatus = local_runtime->exceptionstatus[tid >> 5];
int mask = 1 << (tid & 31);
return (warpStatus & mask) != 0;
#endif
#else
return false;
#endif
}
hyb_device hyb_inline static void throwexception(void* data, int file, int line, int code)
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
asm("trap;");
#else
//reinterpret_cast<int*>(__hybridizer_cuda_local_shared)[1] = -1;
runtime* local_runtime = (runtime*)getruntime();
int tid = threadid();
int bid = blockid();
atomicOr(&(local_runtime->exceptionstatus[tid >> 5]), 1 << (tid & 31));
int index = tid + (blockDim.x * blockDim.y * blockDim.z * bid);
unsigned int location = atomicInc(&local_runtime->exceptioncount, 0x7FFFFFFF);
local_runtime->exceptionstacks[index].set(exceptionentry(data), location, code);
local_runtime->exceptionstacks[index].populate(file, line);
//local_runtime->exceptionentries [location] = index ;
#endif
#endif
}
hyb_device hyb_inline static void populatestackframe(int file, int line)
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
// DO NOTHING
#else
if (inexception())
{
runtime* local_runtime = (runtime*)getruntime();
int tid = threadid();
int bid = blockid();
int index = tid + (blockDim.x * blockDim.y * blockDim.z * bid);
local_runtime->exceptionstacks[index].populate(file, line);
}
#endif
#endif
}
// returns true if the instancetypeid is a subtype of basetypeid
hyb_device hyb_inline static bool implements(int basetypeid, int instancetypeid)
{
// TODO : really work
return basetypeid == instancetypeid;
}
template <typename T>
hyb_device hyb_inline static exceptionhandle<T> catchexception()
{
exceptionhandle<T> result;
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
return result;
#else
if (!inexception()) return exceptionhandle<T>(0);
runtime* local_runtime = (runtime*)getruntime();
int tid = threadid();
int bid = blockid();
int index = tid + (blockDim.x * blockDim.y * blockDim.z * bid);
baseexception* be = (baseexception*)local_runtime->exceptionstacks[index].entries[0].exceptiondata;
// if (be == 0) return exceptionhandle<T>(0) ; // => ideally, actually return an inner runtime exception
// TODO : if (runtime::implements (T::extypeid(), be->__typeid))
{
// exception is caught - get the call stack....
result.exception = (T)be;
result.stacksize = local_runtime->exceptionstacks[index].count - 1;
result.stack = local_runtime->exceptionstacks[index].entries + 1;
// ... and reset !
int location = local_runtime->exceptionstacks[index].location;
//local_runtime->exceptionentries [location] = -1 ; // reset
local_runtime->exceptionstacks[index].count = 0;
int mask = ~(1 << (tid & 31));
auto tmp = local_runtime->exceptionstatus[tid >> 5];
atomicAnd(&(local_runtime->exceptionstatus[tid >> 5]), ~(1 << (tid & 31)));
}
// see TODO : else {
// exception is not in proper type hierarchy
// return exceptionhandle<T>(0) ;
//}
#endif
#endif
return result;
}
#ifndef HYBRIDIZER_NO_HOST
static hyb_inline void hostfree(runtime* hrt)
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
// DO NOTHING
#else
::cudaFree(hrt);
#endif
#endif
}
static hyb_inline void hostinit(runtime* hrt, int gridsize, int exbuffersize)
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
// DO NOTHING
#else
// TODO : check and report this error !!!
for (int k = 0; k < 64; ++k)
{
hrt->exceptionstatus[k] = 0;
}
hrt->exceptioncount = 0;
hrt->exceptioninstancescount = 0;
CUDA_CHECK(::cudaMallocManaged(&(hrt->exceptionstacks), gridsize * sizeof(exceptionstack) + exbuffersize));
hrt->exceptioninstances = (((char*)hrt->exceptionstacks) + gridsize * sizeof(exceptionstack));
hrt->gridsize = gridsize;
#endif
#endif
}
};
void runtime::hostrethrow(runtime* hrt)
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
// DO NOTHING
#else
for (int k = 0; k < 64; ++k)
{
if (hrt->exceptionstatus[k] != 0)
{
int idx = k * 64 + bit_scan_forward(hrt->exceptionstatus[k]);
// we have an exception => throw
printf("Throwing %d\n", hrt->exceptionstacks[idx].code);
//throw hrt->exceptionstacks[idx].code;
hrt->_exception_callback(hrt->exceptionstacks[idx].code);
}
}
hrt->host_release();
#endif
#endif
}
#endif
hyb_device void runtime::init(runtime* rt)
{
#ifndef NO_EXCEPTION
#ifdef HYBRIDIZER_EXCEPTIONS_THROWTRAP
// DO NOTHING !
#else
runtime* local_runtime = getruntime();
int tid = threadid();
if ((tid & 31) == 0) local_runtime->exceptionstatus[tid >> 5] = 0;
__syncthreads();
#endif
#endif
}
#ifndef HYBRIDIZER_NO_HOST
hyb_host runtime* runtime::host_getruntime(cudaStream_t stream)
{
#ifdef NO_EXCEPTION
return nullptr;
#else
auto key = std::make_pair(std::this_thread::get_id(), stream);
std::lock_guard<std::mutex> guard(g_dict_mutex);
{
auto it = g_runtime_dict.find(key);
runtime* res;
if (it == g_runtime_dict.end())
{
CUDA_CHECK(::cudaMallocManaged((void**)&res, sizeof(runtime)));
res->_stream = stream;
g_runtime_dict[key] = res;
} else
res = it->second;
return res;
}
#endif
}
hyb_host runtime* runtime::host_getruntime()
{
return host_getruntime((cudaStream_t) 0);
}
hyb_host void runtime::host_release()
{
#ifndef NO_EXCEPTION
auto key = std::make_pair(std::this_thread::get_id(), _stream);
std::lock_guard<std::mutex> guard(g_dict_mutex);
{
auto it = g_runtime_dict.find(key);
if (it != g_runtime_dict.end())
g_runtime_dict.erase(key);
::cudaFree(this->exceptionstacks);
::cudaFree(this);
}
#endif
}
#endif
}
#pragma endregion
#ifndef NO_EXCEPTION
hyb_device hyb_inline void thrownullpointerexception(int fileid, int line)
{
::hybridizer::runtime::throwexception(::hybridizer::runtime::allocateinstance<::hybridizer::nullpointerexception>(), fileid, line, -1);
}
hyb_device hyb_inline void throwindexoutofboundsexception(int fileid, int line)
{
::hybridizer::runtime::throwexception(::hybridizer::runtime::allocateinstance<::hybridizer::indexoutofboundsexception>(), fileid, line, -2);
}
namespace hybridizer
{
template <typename T, typename I>
hyb_device hyb_inline const T& checknpeandbounds(const hybarray<T,1>& a, I i, int fileid, int line)
{
if (a.ptr == nullptr) thrownullpointerexception(fileid, line);
if (i < a.lowerBound[0]) throwindexoutofboundsexception(fileid, line);
if (i >= a.length[0]) throwindexoutofboundsexception(fileid, line);
return a [i] ;
}
template <typename T, typename I>
hyb_device hyb_inline T& checknpeandbounds(hybarray<T, 1>& a, I i, int fileid, int line)
{
if (a.ptr == nullptr) thrownullpointerexception(fileid, line);
if (i < a.lowerBound[0]) throwindexoutofboundsexception(fileid, line);
if (i >= a.length[0]) throwindexoutofboundsexception(fileid, line);
return a[i];
}
template <typename T>
hyb_device hyb_inline T* checknpe(T* p, int fileid, int line)
{
if (p == nullptr) thrownullpointerexception(fileid, line);
return p ;
}
template <typename T, int rank>
hyb_device hyb_inline hybarray<T, rank> checknpe(hybarray<T, rank> p, int fileid, int line)
{
if (p.ptr == nullptr) thrownullpointerexception(fileid, line);
return p ;
}
}
#define __hybridizer__checknpeandbounds(a,i) hybridizer::checknpeandbounds(a,i,__FILE_ID__, __LINE__)
#define __hybridizer__checknpe(a) hybridizer::checknpe(a,__FILE_ID__, __LINE__)
#else
#define __hybridizer__checknpeandbounds(a,i) a[i]
#define __hybridizer__checknpe(a) a
#endif
#endif
namespace hybridizer {
// Native structure for array with length, can be used as an array anywhere
template <typename T, int rank>
struct hybarray {
union {T *ptr ; char __hybridizer_padding_ptr[8] ; } ;
uint32_t length[rank] ;
uint32_t lowerBound[rank];
hyb_inline hyb_device bool check_index(int k, int i) const
{
#ifndef NO_EXCEPTION
if (i < 0 || i >= length[k])
{
::hybridizer::runtime::throwexception(::hybridizer::runtime::allocateinstance<::hybridizer::baseexception>(), __FILE_ID__, __LINE__, -2) ;
return false;
}
#endif
return true;
}
hyb_inline hyb_device const T& operator[] (int i) const { if (check_index(0, i)) return ptr [i];}
hyb_inline hyb_device T& operator[] (int i) { if (check_index(0, i)) return ptr [i];}
hyb_inline hyb_device operator T* () const { return ptr ; }
hyb_inline hyb_device int getRank() const { return rank ; }
hyb_inline hyb_device int getLength(int d) const { return length[d] ; }
hyb_inline hyb_device int getLowerbound(int d) const { return lowerBound[d] ; }
hyb_inline hyb_device int getIndex(const int indexes[]) const
{
int index = 0;
for (int k = 0 ; k < rank ; ++k)
{
int sub = indexes[k] - lowerBound[k];
if (k > 0) index *= length [k];
index += sub ;
check_index(k, sub);
}
return index;
}
hyb_inline hyb_device const T& get(const int indexes[]) const
{
return ptr [getIndex(indexes)] ;
}
hyb_inline hyb_device T* getAdr(const int indexes[]) const
{
return ptr + getIndex(indexes) ;
}
hyb_inline hyb_device void set(const int indexes[], T value)
{
ptr[getIndex(indexes)] = value;
}
};
template <typename T, int rank> hyb_inline hyb_device int hyblength(hybarray<T, rank> ar, int dim) { return ar.length[dim] ; }
template <typename T, int rank> hyb_inline hyb_device int hyblength(hybarray<T, rank>* ar, int dim) { return ar->length[dim] ; }
template <typename T, int rank> hyb_inline hyb_device int hyblowerbound(hybarray<T, rank> ar, int dim) { return ar.lowerBound[dim] ; }
template <typename T, int rank> hyb_inline hyb_device int hyblowerbound(hybarray<T, rank>* ar, int dim) { return ar->lowerBound[dim] ; }
template <typename T, int rank> hyb_inline hyb_device const T& hybget(hybarray<T, rank> ar, int i) { return ar[i] ; }
template <typename T, int rank> hyb_inline hyb_device const T& hybget(hybarray<T, rank> ar, int i, int j) { int idx[2] = {i, j}; return ar.get(idx) ; }
template <typename T, int rank> hyb_inline hyb_device const T& hybget(hybarray<T, rank> ar, int i, int j, int k) { int idx[3] = {i, j, k}; return ar.get(idx) ; }
template <typename T, int rank> hyb_inline hyb_device void hybset(hybarray<T, rank> ar, int i, T value) { return ar[i] = value ; }
template <typename T, int rank> hyb_inline hyb_device void hybset(hybarray<T, rank> ar, int i, int j, T value) { int idx[2] = {i, j}; return ar.set(idx, value) ; }
template <typename T, int rank> hyb_inline hyb_device void hybset(hybarray<T, rank> ar, int i, int j, int k, T value) { int idx[3] = {i, j, k}; return ar.set(idx, value) ; }
template <typename T, int rank> hyb_inline hyb_device const T& hybget(hybarray<T, rank>* ar, int i) { return ar->operator [](i); }
template <typename T, int rank> hyb_inline hyb_device const T& hybget(hybarray<T, rank>* ar, int i, int j) { int idx[2] = { i, j }; return ar->get(idx); }
template <typename T, int rank> hyb_inline hyb_device const T& hybget(hybarray<T, rank>* ar, int i, int j, int k) { int idx[3] = { i, j, k }; return ar->get(idx); }
template <typename T, int rank> hyb_inline hyb_device void hybset(hybarray<T, rank>* ar, int i, T value) { return ar->operator [](i) = value; }
template <typename T, int rank> hyb_inline hyb_device void hybset(hybarray<T, rank>* ar, int i, int j, T value) { int idx[2] = { i, j }; return ar->set(idx, value); }
template <typename T, int rank> hyb_inline hyb_device void hybset(hybarray<T, rank>* ar, int i, int j, int k, T value) { int idx[3] = { i, j, k }; return ar->set(idx, value); }
template <typename T, int rank> hyb_inline hyb_device T* hybaddress(hybarray<T, rank> ar, int i) { return ar + i ; }
template <typename T, int rank> hyb_inline hyb_device T* hybaddress(hybarray<T, rank> ar, int i, int j) { int idx[2] = {i, j}; return ar.getAdr(idx) ; }
template <typename T, int rank> hyb_inline hyb_device T* hybaddress(hybarray<T, rank> ar, int i, int j, int k) { int idx[3] = {i, j, k}; return ar.getAdr(idx) ; }
};
/* surface management */
#pragma region Surface Management
/*#if (__CUDA_ARCH__ < 300)
#error __CUDA_ARCH__ < 300
#else */
#if 1
namespace hybridizer
{
#if (__CUDA_ARCH__ >= 300)
template <int size> inline __device__ void sizedSurf2Dread (cudaSurfaceObject_t surface, int x, int y, void* output) ;
template<> inline __device__ void sizedSurf2Dread<16>(cudaSurfaceObject_t surface, int x, int y, void* output)
{
int4 tmp = ::surf2Dread<int4>(surface, x * 16, y) ;
((int*)output)[0] = tmp.x ;
((int*)output)[1] = tmp.y ;
((int*)output)[2] = tmp.z ;
((int*)output)[3] = tmp.w ;
}
template<> inline __device__ void sizedSurf2Dread<8>(cudaSurfaceObject_t surface, int x, int y, void* output)
{
int2 tmp = ::surf2Dread<int2>(surface, x * 8, y) ;
((int*)output)[0] = tmp.x ;
((int*)output)[1] = tmp.y ;
}
template<> inline __device__ void sizedSurf2Dread<4>(cudaSurfaceObject_t surface, int x, int y, void* output)
{
int tmp = ::surf2Dread<int>(surface, x * 4, y) ;
((int*)output)[0] = tmp ;
}
template<> inline __device__ void sizedSurf2Dread<2>(cudaSurfaceObject_t surface, int x, int y, void* output)
{
short tmp = ::surf2Dread<short>(surface, x * 2, y) ;
((short*)output)[0] = tmp ;
}
template<> inline __device__ void sizedSurf2Dread<1>(cudaSurfaceObject_t surface, int x, int y, void* output)
{
char tmp = ::surf2Dread<char>(surface, x, y) ;
((char*)output)[0] = tmp ;
}
#endif
template <typename T>
static inline __device__ void surf2Dread(void*, cudaSurfaceObject_t surface, int x, int y, T* output)
{
#if (__CUDA_ARCH__ >= 300)
sizedSurf2Dread <sizeof(T)>(surface, x, y, output);
#endif
}
#if (__CUDA_ARCH__ >= 300)
template <int size> inline __device__ void sizedSurf2Dwrite (cudaSurfaceObject_t surface, int x, int y, void* output) ;
template<> inline __device__ void sizedSurf2Dwrite<16>(cudaSurfaceObject_t surface, int x, int y, void* input)
{
int4 tmp ;
tmp.x = ((int*)input)[0] ;
tmp.y = ((int*)input)[1] ;
tmp.z = ((int*)input)[2] ;
tmp.w = ((int*)input)[3] ;
::surf2Dwrite(tmp, surface, x * 16, y) ;
}
template<> inline __device__ void sizedSurf2Dwrite<8>(cudaSurfaceObject_t surface, int x, int y, void* input)
{
int2 tmp ;
tmp.x = ((int*)input)[0] ;
tmp.y = ((int*)input)[1] ;
::surf2Dwrite(tmp, surface, x * 8, y) ;
}
template<> inline __device__ void sizedSurf2Dwrite<4>(cudaSurfaceObject_t surface, int x, int y, void* input)
{
int tmp ;
tmp = ((int*)input)[0] ;
::surf2Dwrite(tmp, surface, x * 4, y) ;
}
template<> inline __device__ void sizedSurf2Dwrite<2>(cudaSurfaceObject_t surface, int x, int y, void* input)
{
short tmp ;
tmp = ((short*)input)[0] ;
::surf2Dwrite(tmp, surface, x * 2, y) ;
}
template<> inline __device__ void sizedSurf2Dwrite<1>(cudaSurfaceObject_t surface, int x, int y, void* input)
{
char tmp ;
tmp = ((char*)input)[0] ;
::surf2Dwrite(tmp, surface, x, y) ;
}
#endif
template <typename T>
static inline __device__ void surf2Dwrite(void*, cudaSurfaceObject_t surface, int x, int y, T* input)
{
#if (__CUDA_ARCH__ >= 300)
sizedSurf2Dwrite<sizeof(T)>(surface, x, y, input) ;
#endif
}
};
#endif
#pragma endregion
#ifndef ATOMICADD_PDD
#define ATOMICADD_PDD
#if __CUDACC_VER_MAJOR__ < 8
__device__ static inline double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old); return __longlong_as_double(old);
}
#endif
#endif //ATOMICADD_PDD
#ifndef ATOMICMAX_PDD
#define ATOMICMAX_PDD
__device__ static inline double atomicMax(double* address, double val)
{
unsigned long long int * addr_as_ull = (unsigned long long int *)address;
unsigned long long int old = *addr_as_ull;
unsigned long long assumed;
do {
assumed = old;
if (val > __longlong_as_double(assumed))
old = atomicCAS(addr_as_ull, assumed, __double_as_longlong(val));
else
break;
} while(assumed != old);
return __longlong_as_double(old);
}
#endif //ATOMICMAX_PDD
struct int8 {
int x, y, z, w, x2,y2,z2,w2 ;
hyb_device hyb_inline int8() {}
hyb_device hyb_inline int8(int xx, int yy, int zz, int ww, int xx2, int yy2, int zz2, int ww2) {
x = xx; y = yy; z = zz; w = ww;
x2 = xx2; y2 = yy2; z2 = zz2; w2 = ww2;
}
};
hyb_inline hyb_device static int8 operator+(const int8& l, const int8&r) {
int8 res;
int* pres = (int*)&res;
int* pl = (int*)&l;
int* pr = (int*)&r;
for(int i = 0; i < 8; ++i) {
pres[i] = pl[i] + pr[i];
}
return res;
}
struct bool8 {
unsigned char mask;
};
struct float8 {
typedef bool8 masktype ;
float x, y, z, w, x2, y2, z2, w2;
hyb_device hyb_inline float8() {}
hyb_device hyb_inline float8(const float8& p)
{
x = p.x ;
y = p.y ;
z = p.z ;
w = p.w ;
x2 = p.x2 ;
y2 = p.y2 ;
z2 = p.z2 ;
w2 = p.w2 ;
}
hyb_device hyb_inline float8(float ix, float iy, float iz, float iw, float ix2, float iy2, float iz2, float iw2)
{
x = ix ;
y = iy ;
z = iz ;
w = iw ;
x2 = ix2 ;
y2 = iy2 ;
z2 = iz2 ;
w2 = iw2 ;
}
};
hyb_device hyb_inline static float8 operator*(const float8& l, const float8&r) {
float8 res;
float* pres = (float*)&res;
float* pl = (float*)&l;
float* pr = (float*)&r;
for(int i = 0; i < 8; ++i) {
pres[i] = pl[i] * pr[i];
}
return res;
}
hyb_device hyb_inline static float8 operator*(const float8& l, const float&r) {
float8 res;
res.x = l.x * r;
res.y = l.y * r;
res.z = l.z * r;
res.w = l.w * r;
res.x2 = l.x2 * r;
res.y2 = l.y2 * r;
res.z2 = l.z2 * r;
res.w2 = l.w2 * r;
return res;
}
hyb_device hyb_inline static float8 operator/(const float8& l, const float8& r) {
float8 res;
float* pres = (float*)&res;
float* pl = (float*)&l;
float* pr = (float*)&r;
for(int i = 0; i < 8; ++i) {
pres[i] = pl[i] / pr[i];
}
return res;
}
hyb_device hyb_inline static float8 operator/(const float8& l, const float& r) {
float8 res;
res.x = l.x / r;
res.y = l.y / r;
res.z = l.z / r;
res.w = l.w / r;
res.x2 = l.x2 / r;
res.y2 = l.y2 / r;
res.z2 = l.z2 / r;
res.w2 = l.w2 / r;
return res;
}
hyb_device hyb_inline static float8 operator-(const float8& l, const float8& r) {
float8 res;
float* pres = (float*)&res;
float* pl = (float*)&l;
float* pr = (float*)&r;
for(int i = 0; i < 8; ++i) {
pres[i] = pl[i] - pr[i];
}
return res;
}
hyb_device hyb_inline static bool8 operator<(const float8& l, const float8& r) {
bool8 res;
res.mask = 0;
float* pl = (float*)&l;
float* pr = (float*)&r;
for(int i = 0; i < 8; ++i) {
res.mask |= ((pl[i] < pr[i]) << i);
}
return res;
}
hyb_device hyb_inline static bool8 operator>(const float8& l, const float8& r) {
bool8 res;
res.mask = 0;
float* pl = (float*)&l;
float* pr = (float*)&r;
for(int i = 0; i < 8; ++i) {
res.mask |= ((pl[i] > pr[i]) << i);
}
return res;
}
hyb_device hyb_inline static bool8 operator>(const float8& l, const float& r) {
bool8 res;
res.mask = 0;
float* pl = (float*)&l;
#pragma unroll
for(int i = 0; i < 8; ++i) {
res.mask |= ((pl[i] > r) << i);
}
return res;
}
hyb_device hyb_inline static float8 operator+(const float8& l, const float8&r) {
float8 res;
float* pres = (float*)&res;
float* pl = (float*)&l;
float* pr = (float*)&r;
for(int i = 0; i < 8; ++i) {
pres[i] = pl[i] + pr[i];
}
return res;
}
hyb_device hyb_inline static float8 operator+(const float8& l, const float&r) {
float8 res;
res.x = l.x + r;
res.y = l.y + r;
res.z = l.z + r;
res.w = l.w + r;
res.x2 = l.x2 + r;
res.y2 = l.y2 + r;
res.z2 = l.z2 + r;
res.w2 = l.w2 + r;
return res;
}
namespace hybridizer {
template<typename Vec, typename Mask>
hyb_device hyb_inline Vec select(const Mask& m, const Vec& l, const Vec& r);
template<>
hyb_device hyb_inline float8 select<float8, bool8>(const bool8& m, const float8& l, const float8& r) {
float8 res;
float* pres = (float*)&res;
float* pl = (float*)&l;
float* pr = (float*)&r;
int mask = 1;
for(int i = 0; i < 8; ++i) {
if(m.mask & mask) {
pres[i] = pl[i];
}
else {
pres[i] = pr[i];
}
mask <<= 1;
}
return res;
}
template<typename Vec, typename Scal>
hyb_device hyb_inline Vec insertElement(const Vec& a, Scal elem, int index);
template<>
hyb_device hyb_inline int8 insertElement(const int8& vec, int elem, int index) {
int8 result(vec);
int* pr = (int*)&result;
int* pv = (int*)&vec;
pr[index] = pv[index];
return result;
}
template<>
hyb_device hyb_inline float8 insertElement(const float8& vec, float elem, int index) {
float8 result(vec);
float* pr = (float*)&result;
float* pv = (float*)&vec;
pr[index] = pv[index];
return result;
}
template<>
hyb_device hyb_inline float4 insertElement(const float4& vec, float elem, int index) {
float4 result(vec);
float* pr = (float*)&result;
float* pv = (float*)&vec;
pr[index] = pv[index];
return result;
}
template<typename Vec, typename Scal>
hyb_device hyb_inline Scal extractElement(const Vec& vec, int index);
template<>
hyb_device hyb_inline int extractElement(const int8& vec, int index) {
int* pv = (int*) &vec;
return pv[index];
}
template<>
hyb_device hyb_inline float extractElement(const float8& vec, int index) {
float* pv = (float*) &vec;
return pv[index];
}
template<>
hyb_device hyb_inline float extractElement(const float4& vec, int index) {
float* pv = (float*) &vec;
return pv[index];
}
template<typename VecOutput, typename VecInput, typename Mask>
hyb_device hyb_inline VecOutput shuffleVector(const VecInput& l, const VecInput& r, const Mask& m);
template<>
hyb_device hyb_inline int8 shuffleVector(const int8& l, const int8& r, const int8& m) {
int8 res;
int* pl = (int*)&l;
int* pr = (int*)&r;
int* pm = (int*)&m;
int* pres = (int*)&res;
#pragma unroll
for(int i = 0; i < 8; ++i) {
int index = pm[i];
if(index >= 0 && index < 8)
pres[i] = pl[index];
else if(index >= 0)
pres[i] = pr[index];
}
return res;
}
template<>
hyb_device hyb_inline float8 shuffleVector(const float8& l, const float8& r, const int8& m) {
float8 res;
float* pl = (float*)&l;
float* pr = (float*)&r;
int* pm = (int*)&m;
float* pres = (float*)&res;
#pragma unroll
for(int i = 0; i < 8; ++i) {
int index = pm[i];
if(index >= 0 && index < 8)
pres[i] = pl[index];
else if(index >= 0)
pres[i] = pr[index];
}
return res;
}
}
static inline __device__ float4 hybrid_ldg(float4* x) { return *x ; }
static inline __device__ double hybrid_ldg(double* x) { return *x ; }
__device__ static inline float4 operator+(float4 a, float b)
{
float4 res ;
res.x = a.x + b ;
res.y = a.y + b ;
res.z = a.z + b ;
res.w = a.w + b ;
return res ;
}
__device__ static inline float4 operator*(float4 a, float b)
{
float4 res ;
res.x = a.x * b ;
res.y = a.y * b ;
res.z = a.z * b ;
res.w = a.w * b ;
return res ;
}
__device__ static inline float4 operator+(float a, float4 b)
{
float4 res ;
res.x = a + b.x ;
res.y = a + b.y ;
res.z = a + b.z ;
res.w = a + b.w ;
return res ;
}
__device__ static inline float4 operator*(float a, float4 b)
{
float4 res ;
res.x = a * b.x ;
res.y = a * b.y ;
res.z = a * b.z ;
res.w = a * b.w ;
return res ;
}
__device__ static inline float4 operator+(float4 a, float4 b)
{
float4 res ;
res.x = a.x + b.x ;
res.y = a.y + b.y ;
res.z = a.z + b.z ;
res.w = a.w + b.w ;
return res ;
}
__device__ static inline float4 operator*(float4 a, float4 b)
{
float4 res ;
res.x = a.x * b.x ;
res.y = a.y * b.y ;
res.z = a.z * b.z ;
res.w = a.w * b.w ;
return res ;
}
/*
#if __CUDA_ARCH__ < 35
__device__ float4 hybrid_ldg(float4* x) { return *x ; }
#else
extern "C" __device__ float4 _ldg(float4*);
#define hybrid_ldg __ldg
#endif
*/
namespace hybridizer {
template <typename F> struct hybdelegate {
union {hybridobject* instance; char padding_instance[8]; };
union {F functor; char padding_functor[8]; };
// compare to null, and assign to null
hyb_device hyb_inline hybdelegate<F>& operator= (F func)
{
functor = func ;
return *this ;
}
hyb_device hyb_inline bool operator== (F func)
{
return func == functor ;
}
};
struct hybdatetime {
uint64_t ticks;
hyb_device hyb_inline int64_t get_Ticks() { return (int64_t) (ticks & 0x3fffffffffffffff); }
hyb_device hyb_inline bool operator==(hybdatetime t) { return t.ticks == ticks; }
};
}
#pragma region pointer arithmetics
namespace hybridizer {
template <typename T>
struct nativearrayindexer
{
T* _ptr ;
hyb_device hyb_inline static nativearrayindexer<T> build (void* ptr, size_t index)
{
nativearrayindexer<T> res ;
res._ptr = (T*) (((char*)ptr) + index) ;
return res ;
}
hyb_device hyb_inline static void store (const nativearrayindexer<T>* ptr, const T& value)
{
*(ptr->_ptr) = value ;
}
hyb_device hyb_inline static T* getpointer (const nativearrayindexer<T>& ptr)
{
return ptr._ptr;
}
template <typename U>
hyb_device hyb_inline static U load (const nativearrayindexer<T>* ptr)
{
return (U)(*(ptr->_ptr));
}
};
// in the special case of T = void* (which happens when code is generated from llvm), we just want to be able to
// - build
// - get the pointer and
// - cast to another type.
// load and store would have no meaning, so implementation is not provided
template <>
struct nativearrayindexer< void > {
void* _ptr;
hyb_device hyb_inline static nativearrayindexer<void> build (void* ptr, size_t index)
{
nativearrayindexer<void> res ;
res._ptr = (void*) (((char*)ptr) + index) ;
return res ;
}
hyb_device hyb_inline static void* getpointer (const nativearrayindexer<void*>& ptr)
{
return ptr._ptr;
}
template <typename U>
hyb_device hyb_inline operator nativearrayindexer<U>()
{
return *(nativearrayindexer<U>*)(void*)this;
}
};
}
#pragma endregion
#pragma region fixed buffers
namespace hybridizer
{
template<typename T, int count>
struct fixedbuffer
{
union
{
T FixedElementField ;
T __data [count] ;
} ;
} ;
}
#pragma endregion
#pragma region llvm memset/memcpy
namespace hybridizer {
template<int align> // alignment in bytes
hyb_device hyb_inline void memseti32(char* ptr, char val, int size) ;
template<>
hyb_device hyb_inline void memseti32<32>(char* ptr, char val, int size)
{
if (val == 0)
{
int4 ival; ival.x = ival.y = ival.z = ival.w = 0 ;
int4* aptr = (int4*)ptr ;
#pragma unroll
for (int i = 0 ; i < size / 16 ; ++i)
{
aptr[i] = ival ;
}
} else {
int iival = val | (((int)val) << 8) | (((int)val) << 16)| (((int)val) << 24);
int4 ival; ival.x = ival.y = ival.z = ival.w = iival ;
int4* aptr = (int4*)ptr ;
#pragma unroll
for (int i = 0 ; i < size / 16 ; ++i)
{
aptr[i] = ival ;
}
}
}
template<int align> // alignment in bytes
hyb_device hyb_inline void memseti32(char* ptr, char val, int size)
{
memset(ptr, val, size);
}
template<int align>
hyb_device hyb_inline void memcpyi32(char* ptr, char* src, int size) ;
template<>
hyb_device hyb_inline void memcpyi32<32>(char* dest, char* src, int size)
{
int4* adest = (int4*)dest ;
int4* asrc = (int4*)src ;
#pragma unroll
for (int i = 0 ; i < size / 16 ; ++i)
{
adest[i] = asrc[i] ;
}
}
template<>
hyb_device hyb_inline void memcpyi32<4>(char* dest, char* src, int size)
{
int* adest = (int*)dest ;
int* asrc = (int*)src ;
#pragma unroll
for (int i = 0 ; i < size / 4 ; ++i)
{
adest[i] = asrc[i] ;
}
}
template<int align>
hyb_device hyb_inline void memcpyi32(char* ptr, char* src, int size)
{
memcpy(ptr, src, size);
}
template<int align>
hyb_device hyb_inline void memcpyi64(char* ptr, char* src, int64_t size) ;
template<>
hyb_device hyb_inline void memcpyi64<32>(char* dest, char* src, int64_t size)
{
int4* adest = (int4*)dest ;
int4* asrc = (int4*)src ;
#pragma unroll
for (int64_t i = 0 ; i < size / 16L ; ++i)
{
adest[i] = asrc[i] ;
}
}
template<>
hyb_device hyb_inline void memcpyi64<4>(char* dest, char* src, int64_t size)
{
int* adest = (int*)dest ;
int* asrc = (int*)src ;
#pragma unroll
for (int64_t i = 0 ; i < size / 4L ; ++i)
{
adest[i] = asrc[i] ;
}
}
template<int align>
hyb_device hyb_inline void memcpyi64(char* ptr, char* src, int64_t size)
{
memcpy(ptr, src, size);
}
}
#define __hybridizer_memseti32(ptr,val,count,align,isvolatile) hybridizer::memseti32<align>(ptr,val,count)
#define __hybridizer_memcpyi32(dest,src,len,align,isvolatile) hybridizer::memcpyi32<align>(dest,src,len)
#define __hybridizer_memcpyi64(dest,src,len,align,isvolatile) hybridizer::memcpyi64<align>(dest,src,len)
#pragma endregion
#pragma region vector load/store
namespace hybridizer {
template<int alignment>
hyb_inline hyb_device float8 loadfloat8(const float8* ptr);
template<>
hyb_inline hyb_device float8 loadfloat8<32>(const float8* ptr) {
float8 res;
float4 low = ((float4*)(ptr))[0];
float4 high = ((float4*)(ptr))[1];
res.x = low.x;
res.y = low.y;
res.z = low.z;
res.w = low.w;
res.x2 = high.x;
res.y2 = high.y;
res.z2 = high.z;
res.w2 = high.w;
return res;
}
template<int alignment>
hyb_inline hyb_device float8 loadfloat8(const float8* ptr) {
return *ptr;
}
template<int alignment>
hyb_inline hyb_device void storefloat8(float8* ptr, const float8& val);
template<>
hyb_inline hyb_device void storefloat8<32>(float8* ptr, const float8& val) {
float4* iptr = (float4*) ptr;
float4* ival = (float4*) &val;
iptr[0] = ival[0];
iptr[1] = ival[1];
}
template<int alignment>
hyb_inline hyb_device void storefloat8(float8* ptr, const float8& val) {
*ptr = val;
}
#define __hybridizer_load_float8(ptr, alignment) hybridizer::loadfloat8<alignment>(ptr)
#define __hybridizer_store_float8(ptr, val, alignment) hybridizer::storefloat8<alignment>(ptr, val)
}
#pragma endregion
#pragma region actions
// #ifndef HYBRIDIZER_NO_HOST
namespace hybridizer {
// TODO?: something better than a function pointer but what??
#ifdef __cpp_variadic_templates
template<typename ...T>
struct action {
typedef void(*funcaction)(void* self, T... i);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr; char padding2[8]; };
union { void* _funcptrvect; char padding3[8]; };
hyb_device hyb_inline action() {}
hyb_device hyb_inline action(void* self, void* func) : _self(self), _funcptr((funcaction)func) {}
hyb_device hyb_inline void invoke(void* self, T... i) {
_funcptr(self, i...);
}
hyb_device hyb_inline void invoke(T... i) {
_funcptr(_self, i...);
}
};
/// action2, action3 and so on are useless in cuda (since we have no vectorization issue). We just need action
template<typename ...T>
using action2 = action<T...>;
template<typename ...T>
using action3 = action<T...>;
template<typename ...T>
using action4 = action<T...>;
#else // visual < 2015
template<typename T>
struct action {
typedef void(*funcaction)(void* self, T i);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr; char padding2[8]; };
union { void* _funcptrvect; char padding3[8]; };
hyb_device hyb_inline action() {}
hyb_device hyb_inline action(void* self, void* func) : _self(self), _funcptr((funcaction)func) {}
hyb_device hyb_inline void invoke(void* self, T i) {
_funcptr(self, i);
}
hyb_device hyb_inline void invoke(T i) {
_funcptr(_self, i);
}
};
template<typename T1, typename T2>
struct action2 {
typedef void(*funcaction)(void* self, T1 i1, T2 i2);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr; char padding2[8]; };
union { void* _funcptrvect; char padding3[8]; };
hyb_device hyb_inline action2() {}
hyb_device hyb_inline action2(void* self, void* func) : _self(self), _funcptr((funcaction)func) {}
hyb_device hyb_inline void invoke(void* self, T1 i1, T2 i2) {
_funcptr(self, i1, i2);
}
hyb_device hyb_inline void invoke(T1 i1, T2 i2) {
_funcptr(_self, i1, i2);
}
};
template<typename T1, typename T2, typename T3>
struct action3 {
typedef void(*funcaction)(void* self, T1 i1, T2 i2, T3 i3);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr; char padding2[8]; };
union { void* _funcptrvect; char padding3[8]; };
hyb_device hyb_inline action3() {}
hyb_device hyb_inline action3(void* self, void* func) : _self(self), _funcptr((funcaction)func) {}
hyb_device hyb_inline void invoke(void* self, T1 i1, T2 i2, T3 i3) {
_funcptr(self, i1, i2, i3);
}
hyb_device hyb_inline void invoke(T1 i1, T2 i2, T3 i3) {
_funcptr(_self, i1, i2, i3);
}
};
template<typename T1, typename T2, typename T3, typename T4>
struct action4 {
typedef void(*funcaction)(void* self, T1 i1, T2 i2, T3 i3, T4 i4);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr; char padding2[8]; };
union { void* _funcptrvect; char padding3[8]; };
hyb_device hyb_inline action4() {}
hyb_device hyb_inline action4(void* self, void* func) : _self(self), _funcptr((funcaction)func) {}
hyb_device hyb_inline void invoke(void* self, T1 i1, T2 i2, T3 i3, T4 i4) {
_funcptr(self, i1, i2, i3, i4);
}
hyb_device hyb_inline void invoke(T1 i1, T2 i2, T3 i3, T4 i4) {
_funcptr(_self, i1, i2, i3, i4);
}
};
#endif
// static actions
#ifndef HYBRIDIZER_NO_HOST
template<typename T, hyb_device void (*func)(T)>
#else
template<typename T, void (*func)(T)>
#endif
struct action_static
{
hyb_device hyb_inline operator action<T> () const { return action<T>(NULL, invoke_ptr) ; } // nullptr not supported by nvcc <dummy>
hyb_device hyb_inline void invoke (T t) { return func (t) ; }
hyb_device hyb_inline void invoke (void* self, T t) { return func (t) ; }
hyb_device hyb_inline static void invoke_ptr (void* self, T t) { return func (t) ; }
};
template <typename T>
hyb_device hyb_inline void parallelfor(void* self, int start, int stop, action<T>* action);
template <>
hyb_device hyb_inline void parallelfor<int>(void* self, int start, int stop, action<int>* action)
{
for(int i = __hybridizer_threadIdxX + __hybridizer_blockIdxX * __hybridizer_blockDimX + start; i < stop; i += __hybridizer_blockDimX * __hybridizer_gridDimX) {
action->invoke(self, i);
}
}
template <typename T1, typename T2>
hyb_device hyb_inline void parallelfor2D(void* self, int startX, int stopX, int startY, int stopY, action2<T1, T2>* action);
template<>
hyb_device hyb_inline void parallelfor2D(void* self, int startX, int stopX, int startY, int stopY, action2<int, int>* action) {
for (int i = __hybridizer_threadIdxY + __hybridizer_blockIdxY * __hybridizer_blockDimY + startY; i < stopY; i += __hybridizer_blockDimY * __hybridizer_gridDimY) {
for (int j = __hybridizer_threadIdxX + __hybridizer_blockIdxX * __hybridizer_blockDimX + startX; j < stopX; j += __hybridizer_blockDimX * __hybridizer_gridDimX) {
action->invoke(self, i, j);
}
}
}
template <typename T>
hyb_device hyb_inline void parallelfor(int start, int stop, action<T> action);
template <>
hyb_device hyb_inline void parallelfor<int>(int start, int stop, action<int> action)
{
for(int i = __hybridizer_threadIdxX + __hybridizer_blockIdxX * __hybridizer_blockDimX + start; i < stop; i += __hybridizer_blockDimX * __hybridizer_gridDimX) {
action.invoke(i);
}
}
template <typename T1, typename T2>
hyb_device hyb_inline void parallelfor2D(int startX, int stopX, int startY, int stopY, action2<T1, T2> action);
template <>
hyb_device hyb_inline void parallelfor2D<int>(int startX, int stopX, int startY, int stopY, action2<int, int> action)
{
for(int i = __hybridizer_threadIdxY + __hybridizer_blockIdxY * __hybridizer_blockDimY + startY; i < stopY; i += __hybridizer_blockDimY * __hybridizer_gridDimY) {
for(int j = __hybridizer_threadIdxX + __hybridizer_blockIdxX * __hybridizer_blockDimX + startX; j < stopX; j += __hybridizer_blockDimX * __hybridizer_gridDimX) {
action.invoke(i, j);
}
}
}
}
// #endif
#pragma endregion
#pragma region Func<...>
#ifndef HYBRIDIZER_NO_HOST
namespace hybridizer
{
template<typename T, typename U, typename V, typename W, typename retV>
struct func4
{
typedef retV (*funcaction)(void* self, T i, U j, V k, W l);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr ; char padding2[8]; };
union { void* _funcptrvect ; char padding3[8]; };
hyb_device hyb_inline func4 () {}
hyb_device hyb_inline func4 (void* self, void* func) : _self(self), _funcptr ((funcaction)func) { }
hyb_device hyb_inline retV invoke(void* self, T i, U j, V k, W l) const {
return _funcptr(self, i, j, k, l);
}
hyb_device hyb_inline retV invoke(T i, U j, V k, W l) const {
return _funcptr(_self, i, j, k, l);
}
} ;
template<typename T, typename U, typename V, typename W, typename retV, hyb_device retV (*funcptr)(T,U,V)>
struct func4_static
{
// http://www.cplusplus.com/forum/beginner/11866/
hyb_device hyb_inline operator func4<T,U,V,W,retV> () const { return func4<T,U,V,W,retV>(NULL, invoke_ptr) ; }
hyb_device hyb_inline retV invoke (T t, U u, V v, W w) { return funcptr (t,u,v, w) ; }
hyb_device hyb_inline retV invoke (void* self, T t, U u, V v, W w) { return funcptr (t,u,v,w) ; }
hyb_device hyb_inline static retV invoke_ptr (void* self, T t, U u, V v, W w) { return funcptr (t,u,v,w) ; }
} ;
template<typename T, typename U, typename V, typename retV>
struct func3
{
typedef retV (*funcaction)(void* self, T i, U j, V k);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr ; char padding2[8]; };
union { void* _funcptrvect ; char padding3[8]; };
hyb_device hyb_inline func3 () {}
hyb_device hyb_inline func3 (void* self, void* func) : _self(self), _funcptr ((funcaction)func) { }
hyb_device hyb_inline retV invoke(void* self, T i, U j, V k) {
return _funcptr(self, i, j, k);
}
hyb_device hyb_inline retV invoke(T i, U j, V k) {
return _funcptr(_self, i, j, k);
}
} ;
template<typename T, typename U, typename V, typename retV, hyb_device retV (*funcptr)(T,U,V)>
struct func3_static
{
// http://www.cplusplus.com/forum/beginner/11866/
hyb_device hyb_inline operator func3<T,U,V,retV> () const { return func3<T,U,V,retV>(NULL, invoke_ptr) ; }
hyb_device hyb_inline retV invoke (T t, U u, V v) { return funcptr (t,u,v) ; }
hyb_device hyb_inline retV invoke (void* self, T t, U u, V v) { return funcptr (t,u,v) ; }
hyb_device hyb_inline static retV invoke_ptr (void* self, T t, U u, V v) { return funcptr (t,u,v) ; }
} ;
template<typename T, typename U, typename retV>
struct func2
{
typedef retV (*funcaction)(void* self, T i, U j);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr ; char padding2[8]; };
union { void* _funcptrvect ; char padding3[8]; };
hyb_device hyb_inline func2 () {}
hyb_device hyb_inline func2 (void* self, void* func) : _self(self), _funcptr ((funcaction)func) { }
hyb_device hyb_inline retV invoke(void* self, T i, U j) {
return _funcptr(self, i, j);
}
hyb_device hyb_inline retV invoke(T i, U j) {
return _funcptr(_self, i, j);
}
} ;
template<typename T, typename U, typename retV, hyb_device retV (*funcptr)(T,U)>
struct func2_static
{
// http://www.cplusplus.com/forum/beginner/11866/
hyb_device hyb_inline operator func2<T,U,retV> () const { return func2<T,U,retV>(NULL, invoke_ptr) ; }
hyb_device hyb_inline retV invoke (T t, U u) { return funcptr (t,u) ; }
hyb_device hyb_inline retV invoke (void* self, T t, U u) { return funcptr (t,u) ; }
hyb_device hyb_inline static retV invoke_ptr (void* self, T t, U u) { return funcptr (t,u) ; }
} ;
template<typename S, typename T, typename U, typename retV, hyb_device retV(*funcptr)(S*, T, U)>
struct func2_capture_static
{
hyb_device hyb_inline operator func2<T, U, retV>() const { return func2<T, U, retV>(NULL, invoke_ptr); }
hyb_device hyb_inline retV invoke(T t, U u) { return funcptr(NULL, t, u); }
hyb_device hyb_inline retV invoke(S* self, T t, U u) { return funcptr(self, t, u); }
hyb_device hyb_inline static retV invoke_ptr(S* self, T t, U u) { return funcptr(self, t, u); }
};
template<typename T, typename retV>
struct func1
{
typedef retV (*funcaction)(void* self, T i);
union { void* _self; char padding1[8]; };
union { funcaction _funcptr ; char padding2[8]; };
union { void* _funcptrvect ; char padding3[8]; };
static hyb_device hyb_inline func1<T, retV> null_ptr() {
func1 res;
return res;
}
hyb_device hyb_inline operator void*() { return _self; }
hyb_device hyb_inline func1 () {}
hyb_device hyb_inline func1 (void* self, void* func) : _self(self), _funcptr ((funcaction)func) { }
hyb_device hyb_inline retV invoke(void* self, T i) {
return _funcptr(self, i);
}
hyb_device hyb_inline retV invoke(T i) {
return _funcptr(_self, i);
}
};
template<typename T, typename retV, hyb_device retV (*funcptr)(T)>
struct func1_static
{
// http://www.cplusplus.com/forum/beginner/11866/
hyb_device hyb_inline operator func1<T,retV> () const { return func1<T,retV>(NULL, invoke_ptr) ; }
hyb_device hyb_inline retV invoke (T t) { return funcptr (t) ; }
hyb_device hyb_inline retV invoke (void* self, T t) { return funcptr (t) ; }
hyb_device hyb_inline static retV invoke_ptr (void* self, T t) { return funcptr (t) ; }
} ;
}
#endif
#pragma endregion
namespace hybridizer {
template <typename T> struct nullable {
T data;
union {
bool hasValue;
char __padding[(sizeof(T) - 4) % 4 + 4];
};
hyb_device hyb_inline bool get_HasValue() { return hasValue; }
hyb_device hyb_inline T get_Value() { return data; }
};
struct string {
const char* data;
#if !defined(_WIN32)
hyb_inline hyb_device string() {}
hyb_inline hyb_device string(const char* p) : data(p) {}
#endif
#if !defined(HYBRIDIZER_NO_HOST)
friend hyb_inline hyb_device int operator==(const hybridizer::string& s1, const hybridizer::string& s2) {
return s1.data == s2.data;
}
#endif
hyb_inline hyb_device operator const char*() {return data;}
};
#if !defined(HYBRIDIZER_NO_HOST)
hyb_device hyb_inline static void hyprintfline(FILE* f, const string& message) {
printf("%s\n", message.data);
}
#else
hyb_device hyb_inline static void hyprintfline(void* f, const string& message) {
printf("%s\n", message.data);
}
#endif
}
namespace hybridizer
{
template <typename T>
struct alignedindex
{
T inner ;
static hyb_device hyb_inline alignedindex <T> op_Implicit (T t) { alignedindex <T> res ; res.inner = t ; return res ; }
hyb_device hyb_inline T get_Inner () { return inner ; }
// hyb_device hyb_inline alignedindex<T> operator +(T rht) const { alignedindex<T> res ; res.inner = inner + rht ; return res ; }
hyb_device hyb_inline alignedindex<T> () {}
hyb_device hyb_inline alignedindex<T> (T i) { inner = i ; }
hyb_device hyb_inline operator int () const { return inner ; }
hyb_device hyb_inline bool operator<(T i) const { return inner < i ; }
hyb_device hyb_inline bool operator>(T i) const { return inner > i ; }
hyb_device hyb_inline bool operator<=(T i) const { return inner <= i ; }
hyb_device hyb_inline bool operator>=(T i) const { return inner >= i ; }
hyb_device hyb_inline bool operator==(T i) const { return inner == i ; }
hyb_device hyb_inline bool operator!=(T i) const { return inner != i ; }
} ;
template <typename T>
struct alignedstorage
{
union
{
T* inner ;
char __padding[8];
};
hyb_inline hyb_device T get_Item (const int & index) const
{
return inner[index];
}
hyb_inline hyb_device void set_Item (int index, T value) {
inner[index] = value;
}
} ;
template <typename T>
struct stackarray
{
union
{
T* inner;
char __padding[8];
};
hyb_inline hyb_device T get_Item (const int & index) const
{
return inner[index];
}
hyb_inline hyb_device void set_Item (int index, const T& value) {
inner[index] = value;
}
} ;
}
namespace hybridizer
{
template<typename T>
hyb_inline hyb_device void swap (T* a, T* b)
{
T p = *b ; *b = *a ; *a = p ;
}
}
namespace hybridizer {
template<typename From, typename To>
hyb_inline hyb_device To bitcast(const From& f)
{
return *((To*)&f);
}
}
#ifdef HYB_CUDA_HALF
#include <hybridizer.cuda.half.cuh>
#endif
namespace hybridizer {
hyb_inline hyb_device bool enforce_serial()
{
return threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0;
}
}
// ----- /HYBRIDIZER_CUDA_CUH -----
#include <cstdio>
// generating GetTypeID function
#include <cstring> // for strcmp
extern "C" DLL_PUBLIC int HybridizerGetTypeID( const char* fullTypeName)
{
if (strcmp (fullTypeName, "Hybrid.Program") == 0) return 1000000 ;
if (strcmp (fullTypeName, "Hybridizer.Runtime.CUDAImports.IntResidentArray") == 0) return 1000001 ;
if (strcmp (fullTypeName, "Hybridizer.Runtime.CUDAImports.ResidentArrayStatus") == 0) return 1000002 ;
if (strcmp (fullTypeName, "System.Action<System.Int32>") == 0) return 1000003 ;
if (strcmp (fullTypeName, "System.Nullable<System.Int64>") == 0) return 1000004 ;
if (strcmp (fullTypeName, "System.Object") == 0) return 1000005 ;
if (strcmp (fullTypeName, "System.Threading.Tasks.Parallel") == 0) return 1000006 ;
if (strcmp (fullTypeName, "System.Threading.Tasks.ParallelLoopResult") == 0) return 1000007 ;
return 0 ;
}
extern "C" DLL_PUBLIC const char* HybridizerGetTypeFromID( const int typeId)
{
if (typeId == 1000000) return "Hybrid.Program" ;
if (typeId == 1000001) return "Hybridizer.Runtime.CUDAImports.IntResidentArray" ;
if (typeId == 1000002) return "Hybridizer.Runtime.CUDAImports.ResidentArrayStatus" ;
if (typeId == 1000003) return "System.Action<System.Int32>" ;
if (typeId == 1000004) return "System.Nullable<System.Int64>" ;
if (typeId == 1000005) return "System.Object" ;
if (typeId == 1000006) return "System.Threading.Tasks.Parallel" ;
if (typeId == 1000007) return "System.Threading.Tasks.ParallelLoopResult" ;
return "" ;
}
extern "C" DLL_PUBLIC int HybridizerGetShallowSize (const char* fullTypeName)
{
#ifdef __TYPE_DECL__Hybrid_Program___
if (strcmp (fullTypeName, "Hybrid.Program") == 0) return 8 ;
#endif
#ifdef __TYPE_DECL__Hybridizer_Runtime_CUDAImports_IntResidentArray___
if (strcmp (fullTypeName, "Hybridizer.Runtime.CUDAImports.IntResidentArray") == 0) return 32 ;
#endif
#ifdef __TYPE_DECL_int__
if (strcmp (fullTypeName, "Hybridizer.Runtime.CUDAImports.ResidentArrayStatus") == 0) return 4 ;
#endif
#ifdef __TYPE_DECL_hybridizer_action__T____
if (strcmp (fullTypeName, "System.Action<System.Int32>") == 0) return 16 ;
#endif
#ifdef __TYPE_DECL_hybridizer_nullable__T____
if (strcmp (fullTypeName, "System.Nullable<System.Int64>") == 0) return 16 ;
#endif
#ifdef __TYPE_DECL_hybridizer_hybridobject___
if (strcmp (fullTypeName, "System.Object") == 0) return 8 ;
#endif
#ifdef __TYPE_DECL__System_Threading_Tasks_ParallelLoopResult__
if (strcmp (fullTypeName, "System.Threading.Tasks.ParallelLoopResult") == 0) return 24 ;
#endif
return 0 ;
}
// Get various Hybridizer properties at runtime
struct __hybridizer_properties {
int32_t UseHybridArrays;
int32_t Flavor;
int32_t CompatibilityMode;
int32_t _dummy;
};
extern "C" DLL_PUBLIC __hybridizer_properties __HybridizerGetProperties () {
__hybridizer_properties res;
res.UseHybridArrays = 0;
res.Flavor = 1;
res.CompatibilityMode = 0;
return res ;
}
#include <cuda.h>
struct HybridModule
{
void* module_data ;
CUmodule module ;
} ;
extern char __hybridizer_cubin_module_data [] ;
static HybridModule __hybridizer__gs_module = { 0 };
#pragma region Wrappers definitions
extern "C" DLL_PUBLIC int Hybridx46Programx46Create_pre_basis_ExternCWrapper_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, Hybridizer::Runtime::CUDAImports::IntResidentArray* const equation, Hybridizer::Runtime::CUDAImports::IntResidentArray* const pre_basis_main, int N)
{
CUresult cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
if (cures != CUDA_SUCCESS) return (int)cures ;
}
CUfunction __hybridizer__cufunc ;
cures = cuModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Hybridx46Programx46Create_pre_basis") ;
if (cures != CUDA_SUCCESS) return (int)cures ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[5] =
{
(void*)&__hybridizer_runtime,
(void*)&equation,
(void*)&pre_basis_main,
(void*)&N,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = cuLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config, 0) ;
if (cures != CUDA_SUCCESS) return (int)cures ;
int cudaLaunchRes = (int)::cudaPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::cudaDeviceSynchronize () ;
return __synchronizeRes ;
}
extern "C" DLL_PUBLIC int Hybridx46Programx46Substitute_ExternCWrapper_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, int* const equation, Hybridizer::Runtime::CUDAImports::IntResidentArray* const pre_basis, Hybridizer::Runtime::CUDAImports::IntResidentArray* const result, int equationLength, int pre_basisLengthAxis0)
{
CUresult cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
if (cures != CUDA_SUCCESS) return (int)cures ;
}
CUfunction __hybridizer__cufunc ;
cures = cuModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Hybridx46Programx46Substitute") ;
if (cures != CUDA_SUCCESS) return (int)cures ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[7] =
{
(void*)&__hybridizer_runtime,
(void*)&equation,
(void*)&pre_basis,
(void*)&result,
(void*)&equationLength,
(void*)&pre_basisLengthAxis0,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = cuLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config, 0) ;
if (cures != CUDA_SUCCESS) return (int)cures ;
int cudaLaunchRes = (int)::cudaPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::cudaDeviceSynchronize () ;
return __synchronizeRes ;
}
extern "C" DLL_PUBLIC int Hybridx46Programx46Multiply_pre_basis_ExternCWrapper_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, Hybridizer::Runtime::CUDAImports::IntResidentArray* const big_pre_basis, Hybridizer::Runtime::CUDAImports::IntResidentArray* const small_pre_basis, Hybridizer::Runtime::CUDAImports::IntResidentArray* const result, int big_pre_basisLengthAxis1, int small_pre_basisLengthAxis0, int small_pre_basisLengthAxis1)
{
CUresult cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
if (cures != CUDA_SUCCESS) return (int)cures ;
}
CUfunction __hybridizer__cufunc ;
cures = cuModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Hybridx46Programx46Multiply_pre_basis") ;
if (cures != CUDA_SUCCESS) return (int)cures ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[8] =
{
(void*)&__hybridizer_runtime,
(void*)&big_pre_basis,
(void*)&small_pre_basis,
(void*)&result,
(void*)&big_pre_basisLengthAxis1,
(void*)&small_pre_basisLengthAxis0,
(void*)&small_pre_basisLengthAxis1,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = cuLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config, 0) ;
if (cures != CUDA_SUCCESS) return (int)cures ;
int cudaLaunchRes = (int)::cudaPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::cudaDeviceSynchronize () ;
return __synchronizeRes ;
}
extern "C" DLL_PUBLIC int Hybridx46Programx46Simplify_ExternCWrapper_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, Hybridizer::Runtime::CUDAImports::IntResidentArray* const ar, Hybridizer::Runtime::CUDAImports::IntResidentArray* const gcds, int arLengthAxis0, int arLengthAxis1)
{
CUresult cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
if (cures != CUDA_SUCCESS) return (int)cures ;
}
CUfunction __hybridizer__cufunc ;
cures = cuModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Hybridx46Programx46Simplify") ;
if (cures != CUDA_SUCCESS) return (int)cures ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[6] =
{
(void*)&__hybridizer_runtime,
(void*)&ar,
(void*)&gcds,
(void*)&arLengthAxis0,
(void*)&arLengthAxis1,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = cuLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config, 0) ;
if (cures != CUDA_SUCCESS) return (int)cures ;
int cudaLaunchRes = (int)::cudaPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::cudaDeviceSynchronize () ;
return __synchronizeRes ;
}
#pragma endregion
|
87c16f1d7227f91314abc047e0fbfe23bbe64a73.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#define CHECK(call)\
{\
const hipError_t error = call;\
if (error != hipSuccess)\
{\
printf("Error: %s:%d, ", __FILE__, __LINE__);\
printf("code:%d, reason: %s", error, hipGetErrorString(error));\
exit(-10 * error);\
}\
}\
void initHostMatrix(float *h_A, int nxy)
{
time_t t;
srand((unsigned int) time(&t));
for (int i = 0; i < nxy; ++i)
{
h_A[i] = (float) (rand() & 0xff) / 10.0f;
}
}
void printMatrix(int *h_A, int nx, int ny)
{
for (int i = 0; i < ny; ++i) {
for (int j = 0; j < nx; ++j) {
printf("%d\t", h_A[i * nx + j]);
}
printf("\n");
}
}
__global__ void printThreadIndex(int *d_A, const int nx, const int ny)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
printf("thread_id (%d, %d), block_id(%d, %d), coordinate(%d, %d), "
"global index %d value %d\n", threadIdx.x, threadIdx.y,
blockIdx.x, blockIdx.y, ix, iy, idx, d_A[idx]);
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int i = 0; i < ny; ++i) {
for (int j = 0; j < nx; ++j) {
ic[j] = ia[j] + ib[j];
}
ia += nx;
ib += nx;
ic += nx;
}
}
double getCpuSec()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
__global__ void sumMatrixOnDevice2D(float *A, float *B, float *C, const int x, const int y)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * x + ix;
C[idx] = A[idx] + B[idx];
}
int main(void)
{
//get device info
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("using device %d : %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
//set matrix dimension
int nx = 1024;
int ny = 1024;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
//malloc host memory
float *h_A, *h_B, *h_C, *res;
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
h_C = (float*)malloc(nBytes);
res = (float*)malloc(nBytes);
//init host matrix
initHostMatrix(h_A, nxy);
initHostMatrix(h_B, nxy);
//printMatrix(h_A, nx, ny);
memset(h_C, 0, nBytes);
memset(res, 0, nBytes);
double start = getCpuSec();
sumMatrixOnHost(h_A, h_B, h_C, nx, ny);
printf("CPU: %lf\n", getCpuSec() - start);
float *d_A, *d_B, *d_C;
hipMalloc((float **)&d_A, nBytes);
hipMalloc((float **)&d_B, nBytes);
hipMalloc((float **)&d_C, nBytes);
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
int dimx = 32;
int dimy = 16;
dim3 block(dimx, dimy);
dim3 grid((block.x + nx - 1) / block.x, (block.y + ny - 1) / block.y);
printf("block: (%d, %d), grid: (%d, %d)\n", block.x, block.y, grid.x, grid.y);
start = getCpuSec();
hipLaunchKernelGGL(( sumMatrixOnDevice2D), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nx, ny);
printf("GPU: %lf\n", getCpuSec() - start);
hipDeviceSynchronize();
//set up execution configuration
hipMemcpy(res, d_C, nBytes, hipMemcpyDeviceToHost);
printf("%f, %f\n%f, %f\n", h_C[0], res[0], h_C[nxy - 1], res[nxy - 1]);
//invoke the kernel
// printThreadIndex <<<grid, block>>>(d_A, nx, ny);
//free host and device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
free(res);
//reset device
hipDeviceReset();
return 0;
}
| 87c16f1d7227f91314abc047e0fbfe23bbe64a73.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#define CHECK(call)\
{\
const cudaError_t error = call;\
if (error != cudaSuccess)\
{\
printf("Error: %s:%d, ", __FILE__, __LINE__);\
printf("code:%d, reason: %s", error, cudaGetErrorString(error));\
exit(-10 * error);\
}\
}\
void initHostMatrix(float *h_A, int nxy)
{
time_t t;
srand((unsigned int) time(&t));
for (int i = 0; i < nxy; ++i)
{
h_A[i] = (float) (rand() & 0xff) / 10.0f;
}
}
void printMatrix(int *h_A, int nx, int ny)
{
for (int i = 0; i < ny; ++i) {
for (int j = 0; j < nx; ++j) {
printf("%d\t", h_A[i * nx + j]);
}
printf("\n");
}
}
__global__ void printThreadIndex(int *d_A, const int nx, const int ny)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
printf("thread_id (%d, %d), block_id(%d, %d), coordinate(%d, %d), "
"global index %d value %d\n", threadIdx.x, threadIdx.y,
blockIdx.x, blockIdx.y, ix, iy, idx, d_A[idx]);
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int i = 0; i < ny; ++i) {
for (int j = 0; j < nx; ++j) {
ic[j] = ia[j] + ib[j];
}
ia += nx;
ib += nx;
ic += nx;
}
}
double getCpuSec()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
__global__ void sumMatrixOnDevice2D(float *A, float *B, float *C, const int x, const int y)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * x + ix;
C[idx] = A[idx] + B[idx];
}
int main(void)
{
//get device info
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("using device %d : %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
//set matrix dimension
int nx = 1024;
int ny = 1024;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
//malloc host memory
float *h_A, *h_B, *h_C, *res;
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
h_C = (float*)malloc(nBytes);
res = (float*)malloc(nBytes);
//init host matrix
initHostMatrix(h_A, nxy);
initHostMatrix(h_B, nxy);
//printMatrix(h_A, nx, ny);
memset(h_C, 0, nBytes);
memset(res, 0, nBytes);
double start = getCpuSec();
sumMatrixOnHost(h_A, h_B, h_C, nx, ny);
printf("CPU: %lf\n", getCpuSec() - start);
float *d_A, *d_B, *d_C;
cudaMalloc((float **)&d_A, nBytes);
cudaMalloc((float **)&d_B, nBytes);
cudaMalloc((float **)&d_C, nBytes);
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
int dimx = 32;
int dimy = 16;
dim3 block(dimx, dimy);
dim3 grid((block.x + nx - 1) / block.x, (block.y + ny - 1) / block.y);
printf("block: (%d, %d), grid: (%d, %d)\n", block.x, block.y, grid.x, grid.y);
start = getCpuSec();
sumMatrixOnDevice2D<<<grid, block>>>(d_A, d_B, d_C, nx, ny);
printf("GPU: %lf\n", getCpuSec() - start);
cudaDeviceSynchronize();
//set up execution configuration
cudaMemcpy(res, d_C, nBytes, cudaMemcpyDeviceToHost);
printf("%f, %f\n%f, %f\n", h_C[0], res[0], h_C[nxy - 1], res[nxy - 1]);
//invoke the kernel
// printThreadIndex <<<grid, block>>>(d_A, nx, ny);
//free host and device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
free(res);
//reset device
cudaDeviceReset();
return 0;
}
|
6da0636815b432151fb5b3fd735f07ffac6db783.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlascl.cu normal z -> s, Fri Jan 30 19:00:09 2015
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
slascl_full(int m, int n, float mul, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for(int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl_lower(int m, int n, float mul, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for(int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl_upper(int m, int n, float mul, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for(int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/**
Purpose
-------
SLASCL multiplies the M by N real matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
kl INTEGER
Unused, for LAPACK compatability.
\param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
\param[in]
cfrom REAL
\param[in]
cto REAL
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
\param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slascl_q(
magma_type_t type, magma_int_t kl, magma_int_t ku,
float cfrom, float cto,
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
float smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK slascl
// Get machine parameters
smlnum = lapackf77_slamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
hipLaunchKernelGGL(( slascl_lower) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( slascl_upper) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( slascl_full) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda);
}
cnt += 1;
}
}
/**
@see magmablas_slascl_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
float cfrom, float cto,
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_int_t *info )
{
magmablas_slascl_q( type, kl, ku, cfrom, cto, m, n, dA, ldda, magma_stream, info );
}
| 6da0636815b432151fb5b3fd735f07ffac6db783.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlascl.cu normal z -> s, Fri Jan 30 19:00:09 2015
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
slascl_full(int m, int n, float mul, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for(int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl_lower(int m, int n, float mul, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for(int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl_upper(int m, int n, float mul, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for(int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/**
Purpose
-------
SLASCL multiplies the M by N real matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
kl INTEGER
Unused, for LAPACK compatability.
\param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
\param[in]
cfrom REAL
\param[in]
cto REAL
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
\param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slascl_q(
magma_type_t type, magma_int_t kl, magma_int_t ku,
float cfrom, float cto,
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
float smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK slascl
// Get machine parameters
smlnum = lapackf77_slamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
slascl_lower <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
slascl_upper <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
slascl_full <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda);
}
cnt += 1;
}
}
/**
@see magmablas_slascl_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
float cfrom, float cto,
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_int_t *info )
{
magmablas_slascl_q( type, kl, ku, cfrom, cto, m, n, dA, ldda, magma_stream, info );
}
|
7f5ffb79fba5292ac63bd50f78ba89c1905a300e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <fcntl.h>
#include <float.h>
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI 3.1415926535897932
const int threads_per_block = 512;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
double elapsed_time(long long start_time, long long end_time) {
return (double)(end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
void cuda_print_double_array(double *array_GPU, size_t size) {
// allocate temporary array for printing
double *mem = (double *)malloc(sizeof(double) * size);
// transfer data from device
hipMemcpy(mem, array_GPU, sizeof(double) * size, hipMemcpyDeviceToHost);
printf("PRINTING ARRAY VALUES\n");
// print values in memory
for (size_t i = 0; i < size; ++i) {
printf("[%lu]:%0.6f\n", i, mem[i]);
}
printf("FINISHED PRINTING ARRAY VALUES\n");
// clean up memory
free(mem);
mem = NULL;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 -
*(IK[IND] - 228)^2)/ 100 param 1 I 3D matrix param 2 current ind array param 3
*length of ind array returns a double representing the sum
********************************/
__device__ double calcLikelihoodSum(unsigned char *I, int *ind, int numOnes,
int index) {
double likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((double)(I[ind[index * numOnes + x]] - 100), 2) -
pow((double)(I[ind[index * numOnes + x]] - 228), 2)) /
50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(double *CDF, double *weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a double representing a randomily generated number from a uniform
*distribution with range [0, 1)
******************************/
__device__ double d_randu(int *seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double)M));
} /**
* Generates a uniformly distributed random number using the provided seed and
* GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int *seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double)M));
}
/**
* Generates a normally distributed random number using the Box-Muller
* transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller
* algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing
* value for normal random distribution
*/
double randn(int *seed, int index) {
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * PI * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
double test_randn(int *seed, int index) {
// Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ double d_randn(int *seed, int index) {
// Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = d_randu(seed, index);
double v = d_randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ double updateWeights(double *weights, double *likelihood,
int Nparticles) {
int x;
double sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(double *CDF, int beginIndex, int endIndex,
double value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/** added this function. was missing in original double version.
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value
* > input value
*/
__device__ double dev_round_double(double value) {
int newValue = (int)(value);
if (value - newValue < .5f)
return newValue;
else
return newValue++;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(double *arrayX, double *arrayY, double *CDF,
double *u, double *xj, double *yj,
double *weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (CDF[x] >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
// weights[i] = 1 / ((double) (Nparticles)); //moved this code to the
// beginning of likelihood kernel
}
__syncthreads();
}
__global__ void normalize_weights_kernel(double *weights, int Nparticles,
double *partial_sums, double *CDF,
double *u, int *seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ double u1, sumWeights;
if (0 == threadIdx.x)
sumWeights = partial_sums[0];
__syncthreads();
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
__syncthreads();
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u[0] =
(1 / ((double)(Nparticles))) *
d_randu(
seed,
i); // do this to allow all threads in all blocks to use the same u1
}
__syncthreads();
if (0 == threadIdx.x)
u1 = u[0];
__syncthreads();
if (i < Nparticles) {
u[i] = u1 + i / ((double)(Nparticles));
}
}
__global__ void sum_kernel(double *partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
double sum = 0.0;
int num_blocks = ceil((double)Nparticles / (double)threads_per_block);
for (x = 0; x < num_blocks; x++) {
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(double *arrayX, double *arrayY, double *xj,
double *yj, double *CDF, int *ind, int *objxy,
double *likelihood, unsigned char *I,
double *u, double *weights, int Nparticles,
int countOnes, int max_size, int k, int IszY,
int Nfr, int *seed, double *partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
int indX, indY;
__shared__ double buffer[512];
if (i < Nparticles) {
arrayX[i] = xj[i];
arrayY[i] = yj[i];
weights[i] =
1 / ((double)(Nparticles)); // Donnie - moved this line from end of
// find_index_kernel to prevent all weights
// from being reset before calculating
// position on final iteration.
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
}
__syncthreads();
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
// added dev_round_double() to be consistent with roundDouble
indX = dev_round_double(arrayX[i]) + objxy[y * 2 + 1];
indY = dev_round_double(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = abs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
weights[i] =
weights[i] * exp(likelihood[i]); // Donnie Newell - added the missing
// exponential function call
}
buffer[threadIdx.x] = 0.0;
__syncthreads();
if (i < Nparticles) {
buffer[threadIdx.x] = weights[i];
}
__syncthreads();
// this doesn't account for the last block that isn't full
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value
* > input value
*/
double roundDouble(double value) {
int newValue = (int)(value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the
* testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char *array3D, int *dimX,
int *dimY, int *dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal
* distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char *array3D, int *dimX, int *dimY, int *dimZ,
int *seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] =
array3D[x * *dimY * *dimZ + y * *dimZ + z] +
(unsigned char)(5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int *disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
double distance = sqrt(pow((double)(x - radius + 1), 2) +
pow((double)(y - radius + 1), 2));
if (distance < radius)
disk[x * diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char *matrix, int posX, int posY, int posZ,
int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
double distance =
sqrt(pow((double)(x - posX), 2) + pow((double)(y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char *matrix, int dimX, int dimY, int dimZ,
int error, unsigned char *newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int *se, int numOnes, int *neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int)(y - center);
neighbors[neighY * 2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the background intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char *I, int IszX, int IszY, int Nfr, int *seed) {
int k;
int max_size = IszX * IszY * Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY / 2.0);
int y0 = (int)roundDouble(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k - 1));
yk = abs(y0 - 2 * (k - 1));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char *newMatrix =
(unsigned char *)malloc(sizeof(unsigned char) * IszX * IszY * Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] =
newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the
* provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the
* last index
*/
int findIndex(double *CDF, int lengthCDF, double value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In
* addition, it references a provided MATLAB function which takes the video, the
* objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char *I, int IszX, int IszY, int Nfr, int *seed,
int Nparticles) {
int max_size = IszX * IszY * Nfr;
// original particle centroid
double xe = roundDouble(IszY / 2.0);
double ye = roundDouble(IszX / 2.0);
// expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int *disk = (int *)malloc(diameter * diameter * sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int *objxy = (int *)malloc(countOnes * 2 * sizeof(int));
getneighbors(disk, countOnes, objxy, radius);
// initial weights are all equal (1/Nparticles)
double *weights = (double *)malloc(sizeof(double) * Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((double)(Nparticles));
}
// initial likelihood to 0.0
double *likelihood = (double *)malloc(sizeof(double) * Nparticles);
double *arrayX = (double *)malloc(sizeof(double) * Nparticles);
double *arrayY = (double *)malloc(sizeof(double) * Nparticles);
double *xj = (double *)malloc(sizeof(double) * Nparticles);
double *yj = (double *)malloc(sizeof(double) * Nparticles);
double *CDF = (double *)malloc(sizeof(double) * Nparticles);
// GPU copies of arrays
double *arrayX_GPU;
double *arrayY_GPU;
double *xj_GPU;
double *yj_GPU;
double *CDF_GPU;
double *likelihood_GPU;
unsigned char *I_GPU;
double *weights_GPU;
int *objxy_GPU;
int *ind = (int *)malloc(sizeof(int) * countOnes * Nparticles);
int *ind_GPU;
double *u = (double *)malloc(sizeof(double) * Nparticles);
double *u_GPU;
int *seed_GPU;
double *partial_sums;
// CUDA memory allocation
check_error(hipMalloc((void **)&arrayX_GPU, sizeof(double) * Nparticles));
check_error(hipMalloc((void **)&arrayY_GPU, sizeof(double) * Nparticles));
check_error(hipMalloc((void **)&xj_GPU, sizeof(double) * Nparticles));
check_error(hipMalloc((void **)&yj_GPU, sizeof(double) * Nparticles));
check_error(hipMalloc((void **)&CDF_GPU, sizeof(double) * Nparticles));
check_error(hipMalloc((void **)&u_GPU, sizeof(double) * Nparticles));
check_error(
hipMalloc((void **)&likelihood_GPU, sizeof(double) * Nparticles));
// set likelihood to zero
check_error(
hipMemset((void *)likelihood_GPU, 0, sizeof(double) * Nparticles));
check_error(hipMalloc((void **)&weights_GPU, sizeof(double) * Nparticles));
check_error(
hipMalloc((void **)&I_GPU, sizeof(unsigned char) * IszX * IszY * Nfr));
check_error(hipMalloc((void **)&objxy_GPU, sizeof(int) * 2 * countOnes));
check_error(
hipMalloc((void **)&ind_GPU, sizeof(int) * countOnes * Nparticles));
check_error(hipMalloc((void **)&seed_GPU, sizeof(int) * Nparticles));
check_error(hipMalloc((void **)&partial_sums, sizeof(double) * Nparticles));
// Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
// int indX, indY; KERMA: unused
// start send
long long send_start = get_time();
check_error(hipMemcpy(I_GPU, I, sizeof(unsigned char) * IszX * IszY * Nfr,
hipMemcpyHostToDevice));
check_error(hipMemcpy(objxy_GPU, objxy, sizeof(int) * 2 * countOnes,
hipMemcpyHostToDevice));
check_error(hipMemcpy(weights_GPU, weights, sizeof(double) * Nparticles,
hipMemcpyHostToDevice));
check_error(hipMemcpy(xj_GPU, xj, sizeof(double) * Nparticles,
hipMemcpyHostToDevice));
check_error(hipMemcpy(yj_GPU, yj, sizeof(double) * Nparticles,
hipMemcpyHostToDevice));
check_error(hipMemcpy(seed_GPU, seed, sizeof(int) * Nparticles,
hipMemcpyHostToDevice));
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((double)Nparticles / (double)threads_per_block);
for (k = 1; k < Nfr; k++) {
hipLaunchKernelGGL(( likelihood_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0,
arrayX_GPU, arrayY_GPU, xj_GPU, yj_GPU, CDF_GPU, ind_GPU, objxy_GPU,
likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes,
max_size, k, IszY, Nfr, seed_GPU, partial_sums);
hipLaunchKernelGGL(( sum_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0, partial_sums, Nparticles);
hipLaunchKernelGGL(( normalize_weights_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0,
weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
hipLaunchKernelGGL(( find_index_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0,
arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU,
Nparticles);
} // end loop
// block till kernels are finished
hipDeviceSynchronize();
long long back_time = get_time();
hipFree(xj_GPU);
hipFree(yj_GPU);
hipFree(CDF_GPU);
hipFree(u_GPU);
hipFree(likelihood_GPU);
hipFree(I_GPU);
hipFree(objxy_GPU);
hipFree(ind_GPU);
hipFree(seed_GPU);
hipFree(partial_sums);
long long free_time = get_time();
check_error(hipMemcpy(arrayX, arrayX_GPU, sizeof(double) * Nparticles,
hipMemcpyDeviceToHost));
long long arrayX_time = get_time();
check_error(hipMemcpy(arrayY, arrayY_GPU, sizeof(double) * Nparticles,
hipMemcpyDeviceToHost));
long long arrayY_time = get_time();
check_error(hipMemcpy(weights, weights_GPU, sizeof(double) * Nparticles,
hipMemcpyDeviceToHost));
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("TIME TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt(pow((double)(xe - (int)roundDouble(IszY / 2.0)), 2) +
pow((double)(ye - (int)roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
// CUDA freeing of memory
hipFree(weights_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
// free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
int main(int argc, char *argv[]) {
const char *usage =
"double.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
// check number of arguments
if (argc != 9) {
printf("%s\n", usage);
return 0;
}
// check args deliminators
if (strcmp(argv[1], "-x") || strcmp(argv[3], "-y") || strcmp(argv[5], "-z") ||
strcmp(argv[7], "-np")) {
printf("%s\n", usage);
return 0;
}
int IszX, IszY, Nfr, Nparticles;
// converting a string to a integer
if (sscanf(argv[2], "%d", &IszX) == EOF) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if (IszX <= 0) {
printf("dimX must be > 0\n");
return 0;
}
// converting a string to a integer
if (sscanf(argv[4], "%d", &IszY) == EOF) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if (IszY <= 0) {
printf("dimY must be > 0\n");
return 0;
}
// converting a string to a integer
if (sscanf(argv[6], "%d", &Nfr) == EOF) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if (Nfr <= 0) {
printf("number of frames must be > 0\n");
return 0;
}
// converting a string to a integer
if (sscanf(argv[8], "%d", &Nparticles) == EOF) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if (Nparticles <= 0) {
printf("Number of particles must be > 0\n");
return 0;
}
// establish seed
int *seed = (int *)malloc(sizeof(int) * Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
// malloc matrix
unsigned char *I =
(unsigned char *)malloc(sizeof(unsigned char) * IszX * IszY * Nfr);
long long start = get_time();
// call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
// call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n",
elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
| 7f5ffb79fba5292ac63bd50f78ba89c1905a300e.cu | #include <fcntl.h>
#include <float.h>
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI 3.1415926535897932
const int threads_per_block = 512;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
double elapsed_time(long long start_time, long long end_time) {
return (double)(end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
void cuda_print_double_array(double *array_GPU, size_t size) {
// allocate temporary array for printing
double *mem = (double *)malloc(sizeof(double) * size);
// transfer data from device
cudaMemcpy(mem, array_GPU, sizeof(double) * size, cudaMemcpyDeviceToHost);
printf("PRINTING ARRAY VALUES\n");
// print values in memory
for (size_t i = 0; i < size; ++i) {
printf("[%lu]:%0.6f\n", i, mem[i]);
}
printf("FINISHED PRINTING ARRAY VALUES\n");
// clean up memory
free(mem);
mem = NULL;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 -
*(IK[IND] - 228)^2)/ 100 param 1 I 3D matrix param 2 current ind array param 3
*length of ind array returns a double representing the sum
********************************/
__device__ double calcLikelihoodSum(unsigned char *I, int *ind, int numOnes,
int index) {
double likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((double)(I[ind[index * numOnes + x]] - 100), 2) -
pow((double)(I[ind[index * numOnes + x]] - 228), 2)) /
50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(double *CDF, double *weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a double representing a randomily generated number from a uniform
*distribution with range [0, 1)
******************************/
__device__ double d_randu(int *seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double)M));
} /**
* Generates a uniformly distributed random number using the provided seed and
* GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int *seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double)M));
}
/**
* Generates a normally distributed random number using the Box-Muller
* transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller
* algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing
* value for normal random distribution
*/
double randn(int *seed, int index) {
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * PI * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
double test_randn(int *seed, int index) {
// Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ double d_randn(int *seed, int index) {
// Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = d_randu(seed, index);
double v = d_randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ double updateWeights(double *weights, double *likelihood,
int Nparticles) {
int x;
double sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(double *CDF, int beginIndex, int endIndex,
double value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/** added this function. was missing in original double version.
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value
* > input value
*/
__device__ double dev_round_double(double value) {
int newValue = (int)(value);
if (value - newValue < .5f)
return newValue;
else
return newValue++;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(double *arrayX, double *arrayY, double *CDF,
double *u, double *xj, double *yj,
double *weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (CDF[x] >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
// weights[i] = 1 / ((double) (Nparticles)); //moved this code to the
// beginning of likelihood kernel
}
__syncthreads();
}
__global__ void normalize_weights_kernel(double *weights, int Nparticles,
double *partial_sums, double *CDF,
double *u, int *seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ double u1, sumWeights;
if (0 == threadIdx.x)
sumWeights = partial_sums[0];
__syncthreads();
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
__syncthreads();
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u[0] =
(1 / ((double)(Nparticles))) *
d_randu(
seed,
i); // do this to allow all threads in all blocks to use the same u1
}
__syncthreads();
if (0 == threadIdx.x)
u1 = u[0];
__syncthreads();
if (i < Nparticles) {
u[i] = u1 + i / ((double)(Nparticles));
}
}
__global__ void sum_kernel(double *partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
double sum = 0.0;
int num_blocks = ceil((double)Nparticles / (double)threads_per_block);
for (x = 0; x < num_blocks; x++) {
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(double *arrayX, double *arrayY, double *xj,
double *yj, double *CDF, int *ind, int *objxy,
double *likelihood, unsigned char *I,
double *u, double *weights, int Nparticles,
int countOnes, int max_size, int k, int IszY,
int Nfr, int *seed, double *partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
int indX, indY;
__shared__ double buffer[512];
if (i < Nparticles) {
arrayX[i] = xj[i];
arrayY[i] = yj[i];
weights[i] =
1 / ((double)(Nparticles)); // Donnie - moved this line from end of
// find_index_kernel to prevent all weights
// from being reset before calculating
// position on final iteration.
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
}
__syncthreads();
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
// added dev_round_double() to be consistent with roundDouble
indX = dev_round_double(arrayX[i]) + objxy[y * 2 + 1];
indY = dev_round_double(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = abs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
weights[i] =
weights[i] * exp(likelihood[i]); // Donnie Newell - added the missing
// exponential function call
}
buffer[threadIdx.x] = 0.0;
__syncthreads();
if (i < Nparticles) {
buffer[threadIdx.x] = weights[i];
}
__syncthreads();
// this doesn't account for the last block that isn't full
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value
* > input value
*/
double roundDouble(double value) {
int newValue = (int)(value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the
* testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char *array3D, int *dimX,
int *dimY, int *dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal
* distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char *array3D, int *dimX, int *dimY, int *dimZ,
int *seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] =
array3D[x * *dimY * *dimZ + y * *dimZ + z] +
(unsigned char)(5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int *disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
double distance = sqrt(pow((double)(x - radius + 1), 2) +
pow((double)(y - radius + 1), 2));
if (distance < radius)
disk[x * diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char *matrix, int posX, int posY, int posZ,
int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
double distance =
sqrt(pow((double)(x - posX), 2) + pow((double)(y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char *matrix, int dimX, int dimY, int dimZ,
int error, unsigned char *newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int *se, int numOnes, int *neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int)(y - center);
neighbors[neighY * 2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the background intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char *I, int IszX, int IszY, int Nfr, int *seed) {
int k;
int max_size = IszX * IszY * Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY / 2.0);
int y0 = (int)roundDouble(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k - 1));
yk = abs(y0 - 2 * (k - 1));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char *newMatrix =
(unsigned char *)malloc(sizeof(unsigned char) * IszX * IszY * Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] =
newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the
* provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the
* last index
*/
int findIndex(double *CDF, int lengthCDF, double value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In
* addition, it references a provided MATLAB function which takes the video, the
* objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char *I, int IszX, int IszY, int Nfr, int *seed,
int Nparticles) {
int max_size = IszX * IszY * Nfr;
// original particle centroid
double xe = roundDouble(IszY / 2.0);
double ye = roundDouble(IszX / 2.0);
// expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int *disk = (int *)malloc(diameter * diameter * sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int *objxy = (int *)malloc(countOnes * 2 * sizeof(int));
getneighbors(disk, countOnes, objxy, radius);
// initial weights are all equal (1/Nparticles)
double *weights = (double *)malloc(sizeof(double) * Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((double)(Nparticles));
}
// initial likelihood to 0.0
double *likelihood = (double *)malloc(sizeof(double) * Nparticles);
double *arrayX = (double *)malloc(sizeof(double) * Nparticles);
double *arrayY = (double *)malloc(sizeof(double) * Nparticles);
double *xj = (double *)malloc(sizeof(double) * Nparticles);
double *yj = (double *)malloc(sizeof(double) * Nparticles);
double *CDF = (double *)malloc(sizeof(double) * Nparticles);
// GPU copies of arrays
double *arrayX_GPU;
double *arrayY_GPU;
double *xj_GPU;
double *yj_GPU;
double *CDF_GPU;
double *likelihood_GPU;
unsigned char *I_GPU;
double *weights_GPU;
int *objxy_GPU;
int *ind = (int *)malloc(sizeof(int) * countOnes * Nparticles);
int *ind_GPU;
double *u = (double *)malloc(sizeof(double) * Nparticles);
double *u_GPU;
int *seed_GPU;
double *partial_sums;
// CUDA memory allocation
check_error(cudaMalloc((void **)&arrayX_GPU, sizeof(double) * Nparticles));
check_error(cudaMalloc((void **)&arrayY_GPU, sizeof(double) * Nparticles));
check_error(cudaMalloc((void **)&xj_GPU, sizeof(double) * Nparticles));
check_error(cudaMalloc((void **)&yj_GPU, sizeof(double) * Nparticles));
check_error(cudaMalloc((void **)&CDF_GPU, sizeof(double) * Nparticles));
check_error(cudaMalloc((void **)&u_GPU, sizeof(double) * Nparticles));
check_error(
cudaMalloc((void **)&likelihood_GPU, sizeof(double) * Nparticles));
// set likelihood to zero
check_error(
cudaMemset((void *)likelihood_GPU, 0, sizeof(double) * Nparticles));
check_error(cudaMalloc((void **)&weights_GPU, sizeof(double) * Nparticles));
check_error(
cudaMalloc((void **)&I_GPU, sizeof(unsigned char) * IszX * IszY * Nfr));
check_error(cudaMalloc((void **)&objxy_GPU, sizeof(int) * 2 * countOnes));
check_error(
cudaMalloc((void **)&ind_GPU, sizeof(int) * countOnes * Nparticles));
check_error(cudaMalloc((void **)&seed_GPU, sizeof(int) * Nparticles));
check_error(cudaMalloc((void **)&partial_sums, sizeof(double) * Nparticles));
// Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
// int indX, indY; KERMA: unused
// start send
long long send_start = get_time();
check_error(cudaMemcpy(I_GPU, I, sizeof(unsigned char) * IszX * IszY * Nfr,
cudaMemcpyHostToDevice));
check_error(cudaMemcpy(objxy_GPU, objxy, sizeof(int) * 2 * countOnes,
cudaMemcpyHostToDevice));
check_error(cudaMemcpy(weights_GPU, weights, sizeof(double) * Nparticles,
cudaMemcpyHostToDevice));
check_error(cudaMemcpy(xj_GPU, xj, sizeof(double) * Nparticles,
cudaMemcpyHostToDevice));
check_error(cudaMemcpy(yj_GPU, yj, sizeof(double) * Nparticles,
cudaMemcpyHostToDevice));
check_error(cudaMemcpy(seed_GPU, seed, sizeof(int) * Nparticles,
cudaMemcpyHostToDevice));
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((double)Nparticles / (double)threads_per_block);
for (k = 1; k < Nfr; k++) {
likelihood_kernel<<<num_blocks, threads_per_block>>>(
arrayX_GPU, arrayY_GPU, xj_GPU, yj_GPU, CDF_GPU, ind_GPU, objxy_GPU,
likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes,
max_size, k, IszY, Nfr, seed_GPU, partial_sums);
sum_kernel<<<num_blocks, threads_per_block>>>(partial_sums, Nparticles);
normalize_weights_kernel<<<num_blocks, threads_per_block>>>(
weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
find_index_kernel<<<num_blocks, threads_per_block>>>(
arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU,
Nparticles);
} // end loop
// block till kernels are finished
cudaDeviceSynchronize();
long long back_time = get_time();
cudaFree(xj_GPU);
cudaFree(yj_GPU);
cudaFree(CDF_GPU);
cudaFree(u_GPU);
cudaFree(likelihood_GPU);
cudaFree(I_GPU);
cudaFree(objxy_GPU);
cudaFree(ind_GPU);
cudaFree(seed_GPU);
cudaFree(partial_sums);
long long free_time = get_time();
check_error(cudaMemcpy(arrayX, arrayX_GPU, sizeof(double) * Nparticles,
cudaMemcpyDeviceToHost));
long long arrayX_time = get_time();
check_error(cudaMemcpy(arrayY, arrayY_GPU, sizeof(double) * Nparticles,
cudaMemcpyDeviceToHost));
long long arrayY_time = get_time();
check_error(cudaMemcpy(weights, weights_GPU, sizeof(double) * Nparticles,
cudaMemcpyDeviceToHost));
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("TIME TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt(pow((double)(xe - (int)roundDouble(IszY / 2.0)), 2) +
pow((double)(ye - (int)roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
// CUDA freeing of memory
cudaFree(weights_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
// free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
int main(int argc, char *argv[]) {
const char *usage =
"double.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
// check number of arguments
if (argc != 9) {
printf("%s\n", usage);
return 0;
}
// check args deliminators
if (strcmp(argv[1], "-x") || strcmp(argv[3], "-y") || strcmp(argv[5], "-z") ||
strcmp(argv[7], "-np")) {
printf("%s\n", usage);
return 0;
}
int IszX, IszY, Nfr, Nparticles;
// converting a string to a integer
if (sscanf(argv[2], "%d", &IszX) == EOF) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if (IszX <= 0) {
printf("dimX must be > 0\n");
return 0;
}
// converting a string to a integer
if (sscanf(argv[4], "%d", &IszY) == EOF) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if (IszY <= 0) {
printf("dimY must be > 0\n");
return 0;
}
// converting a string to a integer
if (sscanf(argv[6], "%d", &Nfr) == EOF) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if (Nfr <= 0) {
printf("number of frames must be > 0\n");
return 0;
}
// converting a string to a integer
if (sscanf(argv[8], "%d", &Nparticles) == EOF) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if (Nparticles <= 0) {
printf("Number of particles must be > 0\n");
return 0;
}
// establish seed
int *seed = (int *)malloc(sizeof(int) * Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
// malloc matrix
unsigned char *I =
(unsigned char *)malloc(sizeof(unsigned char) * IszX * IszY * Nfr);
long long start = get_time();
// call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
// call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n",
elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
328ed7892ba2369febeba6b260c022c1113fcef7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_kernel [4][1];
static int dims_update_kernel_h [4][1] = {0};
//user function
__device__
void update_kernel_gpu(ACC<double> &rho_new,
ACC<double> &rhou_new,
ACC<double> &rhoE_new,
const ACC<double> &s) {
rho_new(0) = rho_new(0) + s(0,0);
rhou_new(0) = rhou_new(0) + s(1,0);
rhoE_new(0) = rhoE_new(0) + s(2,0);
}
__global__ void ops_update_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
int size0 ){
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1;
arg1 += idx_x * 1*1;
arg2 += idx_x * 1*1;
arg3 += idx_x * 1*3;
if (idx_x < size0) {
ACC<double> argp0(arg0);
ACC<double> argp1(arg1);
ACC<double> argp2(arg2);
const ACC<double> argp3(3, dims_update_kernel[3][0], arg3);
update_kernel_gpu(argp0, argp1, argp2, argp3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_update_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,13)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(13,"update_kernel");
OPS_kernels[13].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[1];
int end[1];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[1];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<1; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
if (xdim0 != dims_update_kernel_h[0][0] || xdim1 != dims_update_kernel_h[1][0] || xdim2 != dims_update_kernel_h[2][0] || xdim3 != dims_update_kernel_h[3][0]) {
dims_update_kernel_h[0][0] = xdim0;
dims_update_kernel_h[1][0] = xdim1;
dims_update_kernel_h[2][0] = xdim2;
dims_update_kernel_h[3][0] = xdim3;
cutilSafeCall(hipMemcpyToSymbol( dims_update_kernel, dims_update_kernel_h, sizeof(dims_update_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1);
dim3 tblock(OPS_block_size_x,1,1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[13].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0)
hipLaunchKernelGGL(( ops_update_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[13].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[13].mpi_time += t2-t1;
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 13;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 13;
for ( int i=0; i<2; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_update_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(13,"update_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 328ed7892ba2369febeba6b260c022c1113fcef7.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_kernel [4][1];
static int dims_update_kernel_h [4][1] = {0};
//user function
__device__
void update_kernel_gpu(ACC<double> &rho_new,
ACC<double> &rhou_new,
ACC<double> &rhoE_new,
const ACC<double> &s) {
rho_new(0) = rho_new(0) + s(0,0);
rhou_new(0) = rhou_new(0) + s(1,0);
rhoE_new(0) = rhoE_new(0) + s(2,0);
}
__global__ void ops_update_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
int size0 ){
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1;
arg1 += idx_x * 1*1;
arg2 += idx_x * 1*1;
arg3 += idx_x * 1*3;
if (idx_x < size0) {
ACC<double> argp0(arg0);
ACC<double> argp1(arg1);
ACC<double> argp2(arg2);
const ACC<double> argp3(3, dims_update_kernel[3][0], arg3);
update_kernel_gpu(argp0, argp1, argp2, argp3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_update_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,13)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(13,"update_kernel");
OPS_kernels[13].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[1];
int end[1];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[1];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<1; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
if (xdim0 != dims_update_kernel_h[0][0] || xdim1 != dims_update_kernel_h[1][0] || xdim2 != dims_update_kernel_h[2][0] || xdim3 != dims_update_kernel_h[3][0]) {
dims_update_kernel_h[0][0] = xdim0;
dims_update_kernel_h[1][0] = xdim1;
dims_update_kernel_h[2][0] = xdim2;
dims_update_kernel_h[3][0] = xdim3;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_kernel, dims_update_kernel_h, sizeof(dims_update_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1);
dim3 tblock(OPS_block_size_x,1,1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[13].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0)
ops_update_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[13].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[13].mpi_time += t2-t1;
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 13;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 13;
for ( int i=0; i<2; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_update_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(13,"update_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
115050dffd1e1c140006abbbedefaa17bb0c5615.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void twiddleImgKernel(float *wi, float *w, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, index;
if (idx < N) {
if (idx == 0) {
for (i = 0; i < N; i++)
wi[idx * N + i] = 0;
} else {
wi[idx * N + 0] = 0;
for (i = 1; i < N; i++) {
index = (idx * i) % N;
wi[idx * N + i] = w[index * 2 + 1];
}
}
}
} | 115050dffd1e1c140006abbbedefaa17bb0c5615.cu | #include "includes.h"
__global__ void twiddleImgKernel(float *wi, float *w, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, index;
if (idx < N) {
if (idx == 0) {
for (i = 0; i < N; i++)
wi[idx * N + i] = 0;
} else {
wi[idx * N + 0] = 0;
for (i = 1; i < N; i++) {
index = (idx * i) % N;
wi[idx * N + i] = w[index * 2 + 1];
}
}
}
} |
cf138ab4711a947199377e05240d871b686e6e3f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "./gpuerrchk.h"
__global__ void hello(char* arr,int *offset){
/*threads 0,1,2,3,4 in each block do this ....*/
if(threadIdx.x<5){
/*... interpret content of "arr" as integer,
perform addition with offset and write result back to "arr"*/
arr[threadIdx.x]+=offset[threadIdx.x];
}
}
int main(){
int N=5;
char a[N]="Hello";
int b[N]={-5,16,-8,-11,-78};
char *a_d;
int *b_d;
/*allocate on device*/
hipMalloc(&a_d,N*sizeof(char));
hipMalloc(&b_d,N*sizeof(int));
/*print "Hello" on host*/
printf("%s ",a);
/*copy "Hello" array "a" and integers "b" to device
gpuErrchk is an error checking macro defined in "./gpuerrchk"*/
gpuErrchk( hipMemcpy(a_d,a,N*sizeof(char),hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(b_d,b,N*sizeof(int),hipMemcpyHostToDevice) );
/*launch kernel with 1 block and 32 threads
(transforms a_d using b_d)
*/
hipLaunchKernelGGL(( hello), dim3(1),dim3(32), 0, 0, a_d,b_d);
//Error checking ....
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
//copy transformed array 'a' back to host
gpuErrchk( hipMemcpy(a,a_d,N,hipMemcpyDeviceToHost) );
//print "Cuda!" on host
printf("%s\n",a);
//free on device
hipFree(a_d);
hipFree(b_d);
}
| cf138ab4711a947199377e05240d871b686e6e3f.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include "./gpuerrchk.h"
__global__ void hello(char* arr,int *offset){
/*threads 0,1,2,3,4 in each block do this ....*/
if(threadIdx.x<5){
/*... interpret content of "arr" as integer,
perform addition with offset and write result back to "arr"*/
arr[threadIdx.x]+=offset[threadIdx.x];
}
}
int main(){
int N=5;
char a[N]="Hello";
int b[N]={-5,16,-8,-11,-78};
char *a_d;
int *b_d;
/*allocate on device*/
cudaMalloc(&a_d,N*sizeof(char));
cudaMalloc(&b_d,N*sizeof(int));
/*print "Hello" on host*/
printf("%s ",a);
/*copy "Hello" array "a" and integers "b" to device
gpuErrchk is an error checking macro defined in "./gpuerrchk"*/
gpuErrchk( cudaMemcpy(a_d,a,N*sizeof(char),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(b_d,b,N*sizeof(int),cudaMemcpyHostToDevice) );
/*launch kernel with 1 block and 32 threads
(transforms a_d using b_d)
*/
hello<<<1,32>>>(a_d,b_d);
//Error checking ....
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
//copy transformed array 'a' back to host
gpuErrchk( cudaMemcpy(a,a_d,N,cudaMemcpyDeviceToHost) );
//print "Cuda!" on host
printf("%s\n",a);
//free on device
cudaFree(a_d);
cudaFree(b_d);
}
|
00b9325aaa457e290b6fe13f0969789ea602d7ca.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrix_log.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
double *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
unsigned int size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrix_log), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrix_log), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrix_log), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 00b9325aaa457e290b6fe13f0969789ea602d7ca.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrix_log.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
double *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
unsigned int size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrix_log<<<gridBlock,threadBlock>>>(A,C,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrix_log<<<gridBlock,threadBlock>>>(A,C,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrix_log<<<gridBlock,threadBlock>>>(A,C,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b651698c13c7e5a39889f7794bf6ca9ddde10241.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***
* Keri Anderson
* CS4230 Assignment 6
* Due Friday, Nov 22, 2013
*
*
* Parallelize Sparse Matrix Vector Mutliplication
* on a GPU using CUDA
*
* - Use lab1-x.eng.utah.edu, where x can be 1, 2, 3, 4 ...
* - The makefile provided should be used to compile
* the program. Use "make" command
* - To run: ./clean_fMRI
*
* You might have to set the LD_LIBRARY_PATH
*
* -export
* LD_LIBRARY_PATH = /usr/local/apps/cuda/3.2/cuda/lib64 (if using bash)
*
* or
*
* setenv LD_LIBRARY_PATH /usr/local/apps/cuda/3.2/cuda/lib64
*
* Expect speedup to be around 2
*
*
*/
#include <stdio.h>
#include <cutil.h>
extern int hipMalloc();
/***********************************************************
*
* GLOBAL FUNCTIONS
*
**********************************************************/
__global__ void sparse_GPU(float *data, float *t, float *b, int *ptr,
int *indices) {
// ToDo: implement the cuda SpMV code here
// Individual threads run this code.
// We need to figure out where to tell
// *this* thread to start processing.
//
// Suppose A was an array of 16 rows, and we
// have 4 blocks with 4 threads each.
// Then if *this* thread is in block 2 and
// thread number 3 within that block,
// Then the row we want to process is
// (2*4) + 3, or row 11.
// Block 0: Block 1:
// thread 0 processes row 0; thread 0 processes row 4; (1*4 + 0 = 4)
// thread 1 processes row 1; thread 1 processes row 5;
// thread 2 processes row 2; thread 2 processes row 6;
// thread 3 processes row 3; thread 3 processes row 7;
//
// Block 2: Block 3:
// thread 0 processes row 8; thread 0 processes row 12; (3*4 + 0 = 12)
// thread 1 processes row 9; thread 1 processes row 13;
// thread 2 processes row 10; thread 2 processes row 14;
// thread 3 processes row 11; thread 3 processes row 15;
// i tells the program to access a specific row,
// j advances the column
//parallelize the outer loop
int i = blockIdx.x *blockDim.x + threadIdx.x;
int j;
for (j = ptr[i]; j<ptr[i+1]; j++) {
t[i] = t[i] + data[j] * b[indices[j]];
}//end for j
}//end global sparse_GPU
/***********************************************************
*
* MAIN
*
**********************************************************/
main (int argc, char **argv) {
FILE *fp;
char line[1024];
int *ptr, *indices;
float *data, *b, *t;
float *d_a, *d_b, *d_c, *h_a;
int *d_ptr, *d_indices;
int i,j;
int n; // number of nonzero elements in data
int nr; // number of rows in matrix
int nc; // number of columns in matrix
// Open input file and read to end of comments
if (argc !=2) abort();
if ((fp = fopen(argv[1], "r")) == NULL) {
abort();
}
fgets(line, 128, fp);
while (line[0] == '%') {
fgets(line, 128, fp);
}
// Read number of rows (nr), number of columns (nc) and
// number of elements and allocate memory for ptr, indices, data, b and t.
sscanf(line,"%d %d %d\n", &nr, &nc, &n);
ptr = (int *) malloc ((nr+1)*sizeof(int)); // nr+1 because the last entry holds
indices = (int *) malloc(n*sizeof(int)); // info about where last row ends
data = (float *) malloc(n*sizeof(float));
b = (float *) malloc(nc*sizeof(float));
t = (float *) malloc(nr*sizeof(float));
// The matrix will be loaded in CSR Format
// ptr array: holds the 'row' information
// indices: holds the 'col' information
// data: holds the actual values.
// example: suppose we had sparse matrix
// 2 0 0 5
// 0 3 0 0
// 1 0 1 0
// 0 0 0 7
// Then our CSR representation would be
// ptr indices data note: for the ptr array, the first entry
// [0] [0] [2] represents row '0' and where in the
// [2] [3] [5] indices array the row 0 elements start.
// [3] [1] [3] The sencond entry represents row 1
// [5] [0] [1] and the value there is where the elements
// [6] [2] [1] of row 1 start: in this case, 2, since
// [3] [7] we had 2 elements in row 0.
// Read data in coordinate format and initialize sparse matrix
int lastr=0;
for (i=0; i<n; i++) {
int r;
fscanf(fp,"%d %d %f\n", &r, &(indices[i]), &(data[i]));
indices[i]--; // start numbering at 0
if (r!=lastr) {
ptr[r-1] = i;
lastr = r;
}
}
ptr[nr] = n;
// initialize t to 0 and b with random data
for (i=0; i<nr; i++) {
t[i] = 0.0;
}
for (i=0; i<nc; i++) {
b[i] = (float) rand()/1111111111;
}
// create CUDA event handles for timing purposes
hipEvent_t start_event, stop_event;
float elapsed_time_seq, elapsed_time_gpu;
CUDA_SAFE_CALL( hipEventCreate(&start_event) );
CUDA_SAFE_CALL( hipEventCreate(&stop_event) );
hipEventRecord(start_event, 0);
// MAIN COMPUTATION, SEQUENTIAL VERSION
for (i=0; i<nr; i++) {
for (j = ptr[i]; j<ptr[i+1]; j++) {
t[i] = t[i] + data[j] * b[indices[j]];
}
}
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
CUDA_SAFE_CALL( hipEventElapsedTime(&elapsed_time_seq,start_event, stop_event) )
// The result from the GPU should be copied to h_a
// By convention, 'h_' is used to indiate memory
// allocated on the Host CPU
h_a = (float *) malloc(nr*sizeof(float));
CUDA_SAFE_CALL( hipEventCreate(&start_event) );
CUDA_SAFE_CALL( hipEventCreate(&stop_event) );
hipEventRecord(start_event, 0);
// ToDo: cuda memory allocation and copy
// hipMalloc creates space on the GPU, with
// a memory-mapped pointer on the CPU host
hipMalloc((void**) &d_a, n*sizeof(float));
hipMalloc((void**) &d_b, nc*sizeof(float)); //number of cols
hipMalloc((void**) &d_c, nr*sizeof(float)); //number of rows
hipMalloc((void**) &d_ptr, (nr+1)*sizeof(float));
hipMalloc((void**) &d_indices, n*sizeof(float));
//now copy
hipMemcpy(d_a, data, n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, nc*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_c, h_a, nr*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_ptr, ptr, (nr+1)*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_indices, indices, n*sizeof(float), hipMemcpyHostToDevice);
// ToDo: declare grid and block dimensions
// We want each thread to perform a row's calculation.
// Essentially, each thread will be performing a
// vector x vector computation (or dot product).
// Each block has a certain amount of threads.
// We want: number of blocks * number of threads (in each block) = total number of rows
// Example: suppose we had 16 rows. If we create 4 blocks with 4 threads in
// each block, then 4*4 = 16 and we will have enough threads.
// Each block would be assigned 4 rows of data to compute.
// dim3 is a CUDA type
// we are parallelizing 1 loop, so we only need one dimension
// in our case nr/100 will divide nicely
// grids are usually 2 dimensional
dim3 gridDim((nr/100), 1); //pass in 1, 1, for the 1 and z dimension so they are not being used
dim3 blockDim(100, 1, 1); //dim3 gridDim.x = 100; another way to express the assignment
// ToDo: call the sparse_GPU cuda kernel (this is the thread code)
hipLaunchKernelGGL(( sparse_GPU), dim3(gridDim), dim3(blockDim), 0, 0, d_a, d_c, d_b, d_ptr, d_indices);
// ToDo: copy back the result
// We only need to copy back the results
hipMemcpy(h_a, d_c, nr*sizeof(float), hipMemcpyDeviceToHost); //these are the results
hipDeviceSynchronize();
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
CUDA_SAFE_CALL( hipEventElapsedTime(&elapsed_time_gpu,start_event, stop_event) )
CUTBoolean res = cutComparefe( h_a, t, nr, 1.0);
if (res == 1) {
printf("VALID!\n Sequential Time: %.2f msec\n Parallel Time: %.2f msec\n Speedup = %.2f\n", elapsed_time_seq, elapsed_time_gpu, elapsed_time_seq/elapsed_time_gpu);
}// end if res == 1
else {
printf("INVALID...\n");
for (i=0; i<nr; i++) {
if (abs(h_a[i]-t[i]) > 1.0) {
printf("i=%d, h_a[i]=%f, t[i]=%f\n", i, h_a[i], t[i]);
break;
}//end if
}//end for i
}//end else
//CPU free
free(h_a);
free(ptr);
free(indices);
free(data);
free(b);
free(t);
//GPU free
CUDA_SAFE_CALL(hipFree(d_a));
CUDA_SAFE_CALL(hipFree(d_b));
CUDA_SAFE_CALL(hipFree(d_c));
CUDA_SAFE_CALL(hipFree(d_ptr));
CUDA_SAFE_CALL(hipFree(d_indices));
}//end main
| b651698c13c7e5a39889f7794bf6ca9ddde10241.cu | /***
* Keri Anderson
* CS4230 Assignment 6
* Due Friday, Nov 22, 2013
*
*
* Parallelize Sparse Matrix Vector Mutliplication
* on a GPU using CUDA
*
* - Use lab1-x.eng.utah.edu, where x can be 1, 2, 3, 4 ...
* - The makefile provided should be used to compile
* the program. Use "make" command
* - To run: ./clean_fMRI
*
* You might have to set the LD_LIBRARY_PATH
*
* -export
* LD_LIBRARY_PATH = /usr/local/apps/cuda/3.2/cuda/lib64 (if using bash)
*
* or
*
* setenv LD_LIBRARY_PATH /usr/local/apps/cuda/3.2/cuda/lib64
*
* Expect speedup to be around 2
*
*
*/
#include <stdio.h>
#include <cutil.h>
extern int cudaMalloc();
/***********************************************************
*
* GLOBAL FUNCTIONS
*
**********************************************************/
__global__ void sparse_GPU(float *data, float *t, float *b, int *ptr,
int *indices) {
// ToDo: implement the cuda SpMV code here
// Individual threads run this code.
// We need to figure out where to tell
// *this* thread to start processing.
//
// Suppose A was an array of 16 rows, and we
// have 4 blocks with 4 threads each.
// Then if *this* thread is in block 2 and
// thread number 3 within that block,
// Then the row we want to process is
// (2*4) + 3, or row 11.
// Block 0: Block 1:
// thread 0 processes row 0; thread 0 processes row 4; (1*4 + 0 = 4)
// thread 1 processes row 1; thread 1 processes row 5;
// thread 2 processes row 2; thread 2 processes row 6;
// thread 3 processes row 3; thread 3 processes row 7;
//
// Block 2: Block 3:
// thread 0 processes row 8; thread 0 processes row 12; (3*4 + 0 = 12)
// thread 1 processes row 9; thread 1 processes row 13;
// thread 2 processes row 10; thread 2 processes row 14;
// thread 3 processes row 11; thread 3 processes row 15;
// i tells the program to access a specific row,
// j advances the column
//parallelize the outer loop
int i = blockIdx.x *blockDim.x + threadIdx.x;
int j;
for (j = ptr[i]; j<ptr[i+1]; j++) {
t[i] = t[i] + data[j] * b[indices[j]];
}//end for j
}//end global sparse_GPU
/***********************************************************
*
* MAIN
*
**********************************************************/
main (int argc, char **argv) {
FILE *fp;
char line[1024];
int *ptr, *indices;
float *data, *b, *t;
float *d_a, *d_b, *d_c, *h_a;
int *d_ptr, *d_indices;
int i,j;
int n; // number of nonzero elements in data
int nr; // number of rows in matrix
int nc; // number of columns in matrix
// Open input file and read to end of comments
if (argc !=2) abort();
if ((fp = fopen(argv[1], "r")) == NULL) {
abort();
}
fgets(line, 128, fp);
while (line[0] == '%') {
fgets(line, 128, fp);
}
// Read number of rows (nr), number of columns (nc) and
// number of elements and allocate memory for ptr, indices, data, b and t.
sscanf(line,"%d %d %d\n", &nr, &nc, &n);
ptr = (int *) malloc ((nr+1)*sizeof(int)); // nr+1 because the last entry holds
indices = (int *) malloc(n*sizeof(int)); // info about where last row ends
data = (float *) malloc(n*sizeof(float));
b = (float *) malloc(nc*sizeof(float));
t = (float *) malloc(nr*sizeof(float));
// The matrix will be loaded in CSR Format
// ptr array: holds the 'row' information
// indices: holds the 'col' information
// data: holds the actual values.
// example: suppose we had sparse matrix
// 2 0 0 5
// 0 3 0 0
// 1 0 1 0
// 0 0 0 7
// Then our CSR representation would be
// ptr indices data note: for the ptr array, the first entry
// [0] [0] [2] represents row '0' and where in the
// [2] [3] [5] indices array the row 0 elements start.
// [3] [1] [3] The sencond entry represents row 1
// [5] [0] [1] and the value there is where the elements
// [6] [2] [1] of row 1 start: in this case, 2, since
// [3] [7] we had 2 elements in row 0.
// Read data in coordinate format and initialize sparse matrix
int lastr=0;
for (i=0; i<n; i++) {
int r;
fscanf(fp,"%d %d %f\n", &r, &(indices[i]), &(data[i]));
indices[i]--; // start numbering at 0
if (r!=lastr) {
ptr[r-1] = i;
lastr = r;
}
}
ptr[nr] = n;
// initialize t to 0 and b with random data
for (i=0; i<nr; i++) {
t[i] = 0.0;
}
for (i=0; i<nc; i++) {
b[i] = (float) rand()/1111111111;
}
// create CUDA event handles for timing purposes
cudaEvent_t start_event, stop_event;
float elapsed_time_seq, elapsed_time_gpu;
CUDA_SAFE_CALL( cudaEventCreate(&start_event) );
CUDA_SAFE_CALL( cudaEventCreate(&stop_event) );
cudaEventRecord(start_event, 0);
// MAIN COMPUTATION, SEQUENTIAL VERSION
for (i=0; i<nr; i++) {
for (j = ptr[i]; j<ptr[i+1]; j++) {
t[i] = t[i] + data[j] * b[indices[j]];
}
}
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
CUDA_SAFE_CALL( cudaEventElapsedTime(&elapsed_time_seq,start_event, stop_event) )
// The result from the GPU should be copied to h_a
// By convention, 'h_' is used to indiate memory
// allocated on the Host CPU
h_a = (float *) malloc(nr*sizeof(float));
CUDA_SAFE_CALL( cudaEventCreate(&start_event) );
CUDA_SAFE_CALL( cudaEventCreate(&stop_event) );
cudaEventRecord(start_event, 0);
// ToDo: cuda memory allocation and copy
// cudaMalloc creates space on the GPU, with
// a memory-mapped pointer on the CPU host
cudaMalloc((void**) &d_a, n*sizeof(float));
cudaMalloc((void**) &d_b, nc*sizeof(float)); //number of cols
cudaMalloc((void**) &d_c, nr*sizeof(float)); //number of rows
cudaMalloc((void**) &d_ptr, (nr+1)*sizeof(float));
cudaMalloc((void**) &d_indices, n*sizeof(float));
//now copy
cudaMemcpy(d_a, data, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, nc*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_a, nr*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_ptr, ptr, (nr+1)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_indices, indices, n*sizeof(float), cudaMemcpyHostToDevice);
// ToDo: declare grid and block dimensions
// We want each thread to perform a row's calculation.
// Essentially, each thread will be performing a
// vector x vector computation (or dot product).
// Each block has a certain amount of threads.
// We want: number of blocks * number of threads (in each block) = total number of rows
// Example: suppose we had 16 rows. If we create 4 blocks with 4 threads in
// each block, then 4*4 = 16 and we will have enough threads.
// Each block would be assigned 4 rows of data to compute.
// dim3 is a CUDA type
// we are parallelizing 1 loop, so we only need one dimension
// in our case nr/100 will divide nicely
// grids are usually 2 dimensional
dim3 gridDim((nr/100), 1); //pass in 1, 1, for the 1 and z dimension so they are not being used
dim3 blockDim(100, 1, 1); //dim3 gridDim.x = 100; another way to express the assignment
// ToDo: call the sparse_GPU cuda kernel (this is the thread code)
sparse_GPU<<<gridDim, blockDim>>>(d_a, d_c, d_b, d_ptr, d_indices);
// ToDo: copy back the result
// We only need to copy back the results
cudaMemcpy(h_a, d_c, nr*sizeof(float), cudaMemcpyDeviceToHost); //these are the results
cudaThreadSynchronize();
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
CUDA_SAFE_CALL( cudaEventElapsedTime(&elapsed_time_gpu,start_event, stop_event) )
CUTBoolean res = cutComparefe( h_a, t, nr, 1.0);
if (res == 1) {
printf("VALID!\n Sequential Time: %.2f msec\n Parallel Time: %.2f msec\n Speedup = %.2f\n", elapsed_time_seq, elapsed_time_gpu, elapsed_time_seq/elapsed_time_gpu);
}// end if res == 1
else {
printf("INVALID...\n");
for (i=0; i<nr; i++) {
if (abs(h_a[i]-t[i]) > 1.0) {
printf("i=%d, h_a[i]=%f, t[i]=%f\n", i, h_a[i], t[i]);
break;
}//end if
}//end for i
}//end else
//CPU free
free(h_a);
free(ptr);
free(indices);
free(data);
free(b);
free(t);
//GPU free
CUDA_SAFE_CALL(cudaFree(d_a));
CUDA_SAFE_CALL(cudaFree(d_b));
CUDA_SAFE_CALL(cudaFree(d_c));
CUDA_SAFE_CALL(cudaFree(d_ptr));
CUDA_SAFE_CALL(cudaFree(d_indices));
}//end main
|
66ad0fb2a4914e9934914e0c87ed3fd307dfb825.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cosmictiger/rockstar.hpp>
#include <cosmictiger/vector.hpp>
#include <cosmictiger/array.hpp>
__device__ int reduce_index(int& index) {
const auto tid = threadIdx.x;
for (int P = 1; P < warpSize; P *= 2) {
const auto tmp = __shfl_up_sync(0xFFFFFFFF, index, P);
if (tid >= P) {
index += tmp;
}
}
int count = __shfl_sync(0xFFFFFFFF, index, warpSize - 1);
auto tmp = __shfl_up_sync(0xFFFFFFFF, index, 1);
if (tid >= 1) {
index = tmp;
} else {
index = 0;
}
return count;
}
__global__ void rockstar_bh_kernel(halo_part* parts, halo_tree* trees, int nparts, float h) {
const auto tid = threadIdx.x;
const auto bid = blockIdx.x;
const auto gsz = blockDim.x;
const float hinv = 1.0f / h;
const float h2 = sqr(h);
const float thetainv2 = sqr(1.f / 0.7f);
const int begin = size_t(bid) * size_t(nparts) / size_t(gsz);
const int end = size_t(bid + 1) * size_t(nparts) / size_t(gsz);
vector<int> current_list;
vector<int> next_list;
vector<int> mono_list;
for (int pi = begin; pi < end; pi++) {
current_list.resize(1);
current_list[0] = 0;
next_list.resize(0);
mono_list.resize(0);
__syncthreads();
const auto& x = parts[pi].x;
parts[pi].phi = -PHI0 * hinv;
while (current_list.size()) {
for (int ci = tid; ci < current_list.size(); ci += warpSize) {
const auto& tree_node = trees[current_list[ci]];
const auto dx = tree_node.x[0] - x[0];
const auto dy = tree_node.x[1] - x[1];
const auto dz = tree_node.x[2] - x[2];
const auto d2 = fmaf(dx, dx, fmaf(dy, dy, sqr(dz)));
int near, far, index, count;
if ((sqr(tree_node.radius) * thetainv2 < d2) || (tree_node.children[0] == -1)) {
far = 1;
near = 0;
} else {
far = 0;
near = 1;
}
index = near;
count = reduce_index(index);
auto offset = next_list.size();
next_list.resize(NCHILD * count + offset, vectorPOD);
__syncthreads();
if (near) {
next_list[offset + NCHILD * index + LEFT] = tree_node.children[LEFT];
next_list[offset + NCHILD * index + RIGHT] = tree_node.children[RIGHT];
}
index = far;
count = reduce_index(index);
offset = mono_list.size();
mono_list.resize(count + offset, vectorPOD);
__syncthreads();
if (near) {
mono_list[offset + index] = current_list[ci];
}
}
current_list.swap(next_list);
float phi = 0.f;
for (int j = tid; j < mono_list.size(); j += warpSize) {
const auto& tree_node = trees[mono_list[j]];
const float dx = x[0] - tree_node.x[0];
const float dy = x[1] - tree_node.x[1];
const float dz = x[2] - tree_node.x[2];
const float r2 = fmaf(dx, dx, fmaf(dy, dy, sqr(dz)));
float rinv;
if (r2 >= h2) {
rinv = rsqrtf(r2);
} else {
const float q = sqrtf(r2) * hinv;
const float q2 = sqr(q);
rinv = -5.0f / 16.0f;
rinv = fmaf(rinv, q2, 21.0f / 16.0f);
rinv = fmaf(rinv, q2, -35.0f / 16.0f);
rinv = fmaf(rinv, q2, 35.0f / 16.0f);
rinv *= hinv;
}
phi -= rinv;
}
for (int P = warpSize / 2; P >= 1; P /= 2) {
phi += __shfl_down_sync(0xffffffff, phi, P);
}
if (tid == 0) {
parts[pi].phi += phi;
}
}
}
}
| 66ad0fb2a4914e9934914e0c87ed3fd307dfb825.cu | #include <cosmictiger/rockstar.hpp>
#include <cosmictiger/vector.hpp>
#include <cosmictiger/array.hpp>
__device__ int reduce_index(int& index) {
const auto tid = threadIdx.x;
for (int P = 1; P < warpSize; P *= 2) {
const auto tmp = __shfl_up_sync(0xFFFFFFFF, index, P);
if (tid >= P) {
index += tmp;
}
}
int count = __shfl_sync(0xFFFFFFFF, index, warpSize - 1);
auto tmp = __shfl_up_sync(0xFFFFFFFF, index, 1);
if (tid >= 1) {
index = tmp;
} else {
index = 0;
}
return count;
}
__global__ void rockstar_bh_kernel(halo_part* parts, halo_tree* trees, int nparts, float h) {
const auto tid = threadIdx.x;
const auto bid = blockIdx.x;
const auto gsz = blockDim.x;
const float hinv = 1.0f / h;
const float h2 = sqr(h);
const float thetainv2 = sqr(1.f / 0.7f);
const int begin = size_t(bid) * size_t(nparts) / size_t(gsz);
const int end = size_t(bid + 1) * size_t(nparts) / size_t(gsz);
vector<int> current_list;
vector<int> next_list;
vector<int> mono_list;
for (int pi = begin; pi < end; pi++) {
current_list.resize(1);
current_list[0] = 0;
next_list.resize(0);
mono_list.resize(0);
__syncthreads();
const auto& x = parts[pi].x;
parts[pi].phi = -PHI0 * hinv;
while (current_list.size()) {
for (int ci = tid; ci < current_list.size(); ci += warpSize) {
const auto& tree_node = trees[current_list[ci]];
const auto dx = tree_node.x[0] - x[0];
const auto dy = tree_node.x[1] - x[1];
const auto dz = tree_node.x[2] - x[2];
const auto d2 = fmaf(dx, dx, fmaf(dy, dy, sqr(dz)));
int near, far, index, count;
if ((sqr(tree_node.radius) * thetainv2 < d2) || (tree_node.children[0] == -1)) {
far = 1;
near = 0;
} else {
far = 0;
near = 1;
}
index = near;
count = reduce_index(index);
auto offset = next_list.size();
next_list.resize(NCHILD * count + offset, vectorPOD);
__syncthreads();
if (near) {
next_list[offset + NCHILD * index + LEFT] = tree_node.children[LEFT];
next_list[offset + NCHILD * index + RIGHT] = tree_node.children[RIGHT];
}
index = far;
count = reduce_index(index);
offset = mono_list.size();
mono_list.resize(count + offset, vectorPOD);
__syncthreads();
if (near) {
mono_list[offset + index] = current_list[ci];
}
}
current_list.swap(next_list);
float phi = 0.f;
for (int j = tid; j < mono_list.size(); j += warpSize) {
const auto& tree_node = trees[mono_list[j]];
const float dx = x[0] - tree_node.x[0];
const float dy = x[1] - tree_node.x[1];
const float dz = x[2] - tree_node.x[2];
const float r2 = fmaf(dx, dx, fmaf(dy, dy, sqr(dz)));
float rinv;
if (r2 >= h2) {
rinv = rsqrtf(r2);
} else {
const float q = sqrtf(r2) * hinv;
const float q2 = sqr(q);
rinv = -5.0f / 16.0f;
rinv = fmaf(rinv, q2, 21.0f / 16.0f);
rinv = fmaf(rinv, q2, -35.0f / 16.0f);
rinv = fmaf(rinv, q2, 35.0f / 16.0f);
rinv *= hinv;
}
phi -= rinv;
}
for (int P = warpSize / 2; P >= 1; P /= 2) {
phi += __shfl_down_sync(0xffffffff, phi, P);
}
if (tid == 0) {
parts[pi].phi += phi;
}
}
}
}
|
99b2aff75b8050bb0f0461c35a9533cb3a3ac279.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
__global__ void matmul(int* M, int* N, int* P, int* width)
{
int row = blockIdx.x;
int col = threadIdx.x;
P[row*(*width)+col]=0; //set the output of current element to 0
for(int i=0; i<*width; i++)
{
P[row*(*width)+col] += M[row*(*width)+i]*N[i*(*width)+col]; //I've converted the general A[row][col] to A[row*width+col]
} //because of the row major format
}
//d_xyz in my code means xyz is on the device
int main()
{
int width = 4; //width of n*n matrix
int* d_width;
hipMalloc(&d_width, sizeof(int));
//copy width
hipMemcpy(d_width, &width, sizeof(int), hipMemcpyHostToDevice);
//define input matrices
int M[width][width] = {{5,7,9,10},
{2,3,3,8},
{8,10,2,3},
{3,3,4,8}
};
int N[width][width] = {{3,10,12,18},
{12,1,4,9},
{9,10,12,2},
{3,12,4,10}};
//declare output matrix on host side
int P[width][width];
int *d_M, *d_N, *d_P;
hipMalloc(&d_M, sizeof(int)*width*width);
hipMalloc(&d_N, sizeof(int)*width*width);
hipMalloc(&d_P, sizeof(int)*width*width);
//copy matrices to GPU
hipMemcpy(d_M, M, sizeof(int)*width*width, hipMemcpyHostToDevice);
hipMemcpy(d_N, N, sizeof(int)*width*width, hipMemcpyHostToDevice);
hipMemcpy(d_P, P, sizeof(int)*width*width, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( matmul), dim3(width), dim3(width), 0, 0, d_M, d_N, d_P, d_width);
hipMemcpy(P, d_P, sizeof(int)*width*width, hipMemcpyDeviceToHost);
cout<<"The output is:\\n";
for(int i=0; i<width; i++)
{
for(int j=0; j<width; j++)
{
cout<<P[i][j]<<" ";
}
cout<<"\\n";
}
hipFree(d_M);
hipFree(d_N);
hipFree(d_P);
return 0;
}
| 99b2aff75b8050bb0f0461c35a9533cb3a3ac279.cu | #include <iostream>
using namespace std;
__global__ void matmul(int* M, int* N, int* P, int* width)
{
int row = blockIdx.x;
int col = threadIdx.x;
P[row*(*width)+col]=0; //set the output of current element to 0
for(int i=0; i<*width; i++)
{
P[row*(*width)+col] += M[row*(*width)+i]*N[i*(*width)+col]; //I've converted the general A[row][col] to A[row*width+col]
} //because of the row major format
}
//d_xyz in my code means xyz is on the device
int main()
{
int width = 4; //width of n*n matrix
int* d_width;
cudaMalloc(&d_width, sizeof(int));
//copy width
cudaMemcpy(d_width, &width, sizeof(int), cudaMemcpyHostToDevice);
//define input matrices
int M[width][width] = {{5,7,9,10},
{2,3,3,8},
{8,10,2,3},
{3,3,4,8}
};
int N[width][width] = {{3,10,12,18},
{12,1,4,9},
{9,10,12,2},
{3,12,4,10}};
//declare output matrix on host side
int P[width][width];
int *d_M, *d_N, *d_P;
cudaMalloc(&d_M, sizeof(int)*width*width);
cudaMalloc(&d_N, sizeof(int)*width*width);
cudaMalloc(&d_P, sizeof(int)*width*width);
//copy matrices to GPU
cudaMemcpy(d_M, M, sizeof(int)*width*width, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, N, sizeof(int)*width*width, cudaMemcpyHostToDevice);
cudaMemcpy(d_P, P, sizeof(int)*width*width, cudaMemcpyHostToDevice);
matmul<<<width, width>>>(d_M, d_N, d_P, d_width);
cudaMemcpy(P, d_P, sizeof(int)*width*width, cudaMemcpyDeviceToHost);
cout<<"The output is:\\n";
for(int i=0; i<width; i++)
{
for(int j=0; j<width; j++)
{
cout<<P[i][j]<<" ";
}
cout<<"\\n";
}
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
return 0;
}
|
84aa767a6934fad851564e35d6b0dabd444b224f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpuTranspose.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int m = 2;
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpuTranspose), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,m,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpuTranspose), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,m,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpuTranspose), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,m,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 84aa767a6934fad851564e35d6b0dabd444b224f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpuTranspose.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int m = 2;
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpuTranspose<<<gridBlock,threadBlock>>>(a,b,m,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpuTranspose<<<gridBlock,threadBlock>>>(a,b,m,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpuTranspose<<<gridBlock,threadBlock>>>(a,b,m,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1e48fa0d24112088bf1dcea29cdb51a392321a77.hip | // !!! This is a file automatically generated by hipify!!!
%%cu
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
using namespace std;
#define NF 100
#define NI (1<<24)
#define NO (NI - NF + 1)
__constant__ float d_flt[NF];
__device__ float d_gflt[NF];
// ############### COMMON ###############
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
inline double seconds()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
void initialArray(float *arr, int size)
{
srand(0);
for (int i = 0; i < size; i++)
{
arr[i] = (float)(rand())/RAND_MAX;
}
}
// ############### Host(CPU) ###############
void convOnHost(float *in, float *flt, float *out){
for (int i = 0; i < NO; i++){
float s = 0;
for (int j = 0; j < NF; j++){
s += in[i + j] * flt[j];
}
out[i] = s;
}
}
// ############### Device(GPU) ###############
__global__ void convUseGMEM(float *d_in, float *d_out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NO){
d_out[i] = 0; // use all global memory
for (int j = 0; j < NF; j++){
d_out[i] += d_gflt[j] * d_in[i + j]; // d_gflt for GMEM
}
}
}
__global__ void convUseCMEM(float *d_in, float *d_out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NO){
d_out[i] = 0;
for (int j = 0; j < NF; j++){
d_out[i] += d_flt[j] * d_in[i + j]; // d_flt for RMEM
}
}
}
__global__ void convUseRMEMAndGMEM(float *d_in, float *d_out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NO){
float s = 0; // s for RMEM
for (int j = 0; j < NF; j++){
s += d_gflt[j] * d_in[i + j]; // d_gflt for GMEM
}
d_out[i] = s;
}
}
bool checkResult(float *hostRef, float *gpuRef, unsigned int size)
{
double epsilon = 1.0E-3;
bool isTrue = 1;
for (int i = 0; i < size; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at %d\n", hostRef[i], gpuRef[i], i);
isTrue = 0;
return isTrue;
}
}
return isTrue;
}
int main()
{
// Set up data for input and filter
float *in, *flt, *hostRes, *deviceRes;
in = (float *) malloc(NI * sizeof(float));
flt = (float *) malloc(NF * sizeof(float));
hostRes = (float *) malloc(NO * sizeof(float));
deviceRes = (float *) malloc(NO * sizeof(float));
initialArray(in, NI);
initialArray(flt, NF);
// Allocate device memories
float *d_in, *d_out;
hipMalloc(&d_in, NI * sizeof(float));
hipMalloc(&d_out, NO * sizeof(float));
hipMemcpy(d_in, in, NI * sizeof(float), hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_gflt, flt, NF * sizeof(float));
hipMemcpyToSymbol(d_flt, flt, NF * sizeof(float));
// Launch the kernel
dim3 blockSize(512);
dim3 gridSize((NO - 1) / blockSize.x + 1);
double iStart, iElaps;
// ##############################################
// convOnHost
iStart = seconds();
convOnHost(in, flt, hostRes);
iElaps = seconds() - iStart;
printf("convOnHost : %f sec\n", iElaps);
// ##############################################
// convUseGMEM
iStart = seconds();
hipLaunchKernelGGL(( convUseGMEM), dim3(gridSize), dim3(blockSize), 0, 0, d_in, d_out);
iElaps = seconds() - iStart;
printf("convUseGMEM : %f sec\n", iElaps);
// Copy results from device memory to host memory
hipMemcpy(deviceRes, d_out, NO * sizeof(float), hipMemcpyDeviceToHost);
checkResult(hostRes, deviceRes, NO);
// ##############################################
// convUseCMEM
iStart = seconds();
hipLaunchKernelGGL(( convUseCMEM), dim3(gridSize), dim3(blockSize), 0, 0, d_in, d_out);
iElaps = seconds() - iStart;
printf("convUseCMEM : %f sec\n", iElaps);
hipMemcpy(deviceRes, d_out, NO * sizeof(float), hipMemcpyDeviceToHost);
checkResult(hostRes, deviceRes, NO);
// ##############################################
// convUseRMEMAndGMEM
iStart = seconds();
hipLaunchKernelGGL(( convUseRMEMAndGMEM), dim3(gridSize), dim3(blockSize), 0, 0, d_in, d_out);
iElaps = seconds() - iStart;
printf("convUseRMEMAndGMEM : %f sec\n", iElaps);
hipMemcpy(deviceRes, d_out, NO * sizeof(float), hipMemcpyDeviceToHost);
checkResult(hostRes, deviceRes, NO);
// Free device memories
hipFree(d_in);
hipFree(d_out);
// free
free(in);
free(flt);
free(hostRes);
free(deviceRes);
return 0;
} | 1e48fa0d24112088bf1dcea29cdb51a392321a77.cu | %%cu
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <sys/time.h>
using namespace std;
#define NF 100
#define NI (1<<24)
#define NO (NI - NF + 1)
__constant__ float d_flt[NF];
__device__ float d_gflt[NF];
// ############### COMMON ###############
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
inline double seconds()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
void initialArray(float *arr, int size)
{
srand(0);
for (int i = 0; i < size; i++)
{
arr[i] = (float)(rand())/RAND_MAX;
}
}
// ############### Host(CPU) ###############
void convOnHost(float *in, float *flt, float *out){
for (int i = 0; i < NO; i++){
float s = 0;
for (int j = 0; j < NF; j++){
s += in[i + j] * flt[j];
}
out[i] = s;
}
}
// ############### Device(GPU) ###############
__global__ void convUseGMEM(float *d_in, float *d_out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NO){
d_out[i] = 0; // use all global memory
for (int j = 0; j < NF; j++){
d_out[i] += d_gflt[j] * d_in[i + j]; // d_gflt for GMEM
}
}
}
__global__ void convUseCMEM(float *d_in, float *d_out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NO){
d_out[i] = 0;
for (int j = 0; j < NF; j++){
d_out[i] += d_flt[j] * d_in[i + j]; // d_flt for RMEM
}
}
}
__global__ void convUseRMEMAndGMEM(float *d_in, float *d_out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NO){
float s = 0; // s for RMEM
for (int j = 0; j < NF; j++){
s += d_gflt[j] * d_in[i + j]; // d_gflt for GMEM
}
d_out[i] = s;
}
}
bool checkResult(float *hostRef, float *gpuRef, unsigned int size)
{
double epsilon = 1.0E-3;
bool isTrue = 1;
for (int i = 0; i < size; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at %d\n", hostRef[i], gpuRef[i], i);
isTrue = 0;
return isTrue;
}
}
return isTrue;
}
int main()
{
// Set up data for input and filter
float *in, *flt, *hostRes, *deviceRes;
in = (float *) malloc(NI * sizeof(float));
flt = (float *) malloc(NF * sizeof(float));
hostRes = (float *) malloc(NO * sizeof(float));
deviceRes = (float *) malloc(NO * sizeof(float));
initialArray(in, NI);
initialArray(flt, NF);
// Allocate device memories
float *d_in, *d_out;
cudaMalloc(&d_in, NI * sizeof(float));
cudaMalloc(&d_out, NO * sizeof(float));
cudaMemcpy(d_in, in, NI * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_gflt, flt, NF * sizeof(float));
cudaMemcpyToSymbol(d_flt, flt, NF * sizeof(float));
// Launch the kernel
dim3 blockSize(512);
dim3 gridSize((NO - 1) / blockSize.x + 1);
double iStart, iElaps;
// ##############################################
// convOnHost
iStart = seconds();
convOnHost(in, flt, hostRes);
iElaps = seconds() - iStart;
printf("convOnHost : %f sec\n", iElaps);
// ##############################################
// convUseGMEM
iStart = seconds();
convUseGMEM<<<gridSize, blockSize>>>(d_in, d_out);
iElaps = seconds() - iStart;
printf("convUseGMEM : %f sec\n", iElaps);
// Copy results from device memory to host memory
cudaMemcpy(deviceRes, d_out, NO * sizeof(float), cudaMemcpyDeviceToHost);
checkResult(hostRes, deviceRes, NO);
// ##############################################
// convUseCMEM
iStart = seconds();
convUseCMEM<<<gridSize, blockSize>>>(d_in, d_out);
iElaps = seconds() - iStart;
printf("convUseCMEM : %f sec\n", iElaps);
cudaMemcpy(deviceRes, d_out, NO * sizeof(float), cudaMemcpyDeviceToHost);
checkResult(hostRes, deviceRes, NO);
// ##############################################
// convUseRMEMAndGMEM
iStart = seconds();
convUseRMEMAndGMEM<<<gridSize, blockSize>>>(d_in, d_out);
iElaps = seconds() - iStart;
printf("convUseRMEMAndGMEM : %f sec\n", iElaps);
cudaMemcpy(deviceRes, d_out, NO * sizeof(float), cudaMemcpyDeviceToHost);
checkResult(hostRes, deviceRes, NO);
// Free device memories
cudaFree(d_in);
cudaFree(d_out);
// free
free(in);
free(flt);
free(hostRes);
free(deviceRes);
return 0;
} |
86926e14c4fe8aa7030ae9cd61edd90a73ac9636.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <ops/specials_cuda.h>
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
__global__ void execOesTadKernelKey(void *vx, Nd4jLong const* xShapeInfo,
void *vy, Nd4jLong const* yShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
bool descending) {
auto x = static_cast<X*>(vx);
auto y = static_cast<Y*>(vy);
__shared__ int xLength;
__shared__ int xTadLength;
__shared__ int numTads;
if (threadIdx.x == 0) {
xLength = shape::length(xShapeInfo);
xTadLength = shape::length(tadShapeInfo);
numTads = xLength / xTadLength;
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto dx = x + tadOffsets[r];
auto dy = y + tadOffsets[r];
// this is general loop, we go uncached
int iterations = xTadLength;
for (int i = 0; i < iterations; i++) {
if (i % 2 == 0) {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto top = 2 * tid + 1;
if (top < xTadLength) {
auto t0 = shape::getIndexOffset(top - 1, tadShapeInfo);
auto t1 = shape::getIndexOffset(top, tadShapeInfo);
if (!descending == (dx[t0] > dx[t1])) {
X dt0 = dx[t0];
dx[t0] = dx[t1];
dx[t1] = dt0;
Y dy0 = dy[t0];
dy[t0] = dy[t1];
dy[t1] = dy0;
}
}
}
} else {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto top = 2 * tid + 2;
if (top < xTadLength) {
auto t0 = shape::getIndexOffset(top - 1, tadShapeInfo);
auto t1 = shape::getIndexOffset(top, tadShapeInfo);
if (!descending == (dx[t0] > dx[t1])) {
X dt0 = dx[t0];
dx[t0] = dx[t1];
dx[t1] = dt0;
Y dy0 = dy[t0];
dy[t0] = dy[t1];
dy[t1] = dy0;
}
}
}
}
__syncthreads();
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void execOesTadKernel(void *vx, Nd4jLong const* xShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
bool descending) {
auto x = static_cast<T*>(vx);
const int sharedSize = 32768;
__shared__ int xLength;
__shared__ int xTadLength;
__shared__ int numTads;
__shared__ T *shmem;
__shared__ bool cached;
if (threadIdx.x == 0) {
xLength = shape::length(xShapeInfo);
xTadLength = shape::length(tadShapeInfo);
numTads = xLength / xTadLength;
extern __shared__ unsigned char shrd[];
shmem = (T *) shrd;
cached = xTadLength <= (sharedSize / sizeof(T));
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto dx = x + tadOffsets[r];
// this is general loop, we go uncached
int iterations = xTadLength;
if (cached) {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto t0 = shape::getIndexOffset(tid, tadShapeInfo);
shmem[tid] = dx[t0];
}
__syncthreads();
dx = shmem;
}
for (int i = 0; i < iterations; i++) {
if (i % 2 == 0) {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto top = 2 * tid + 1;
if (top < xTadLength) {
auto t0 = cached ? top - 1 : shape::getIndexOffset(top - 1, tadShapeInfo);
auto t1 = cached ? top : shape::getIndexOffset(top, tadShapeInfo);
if (!descending == (dx[t0] > dx[t1])) {
T dt0 = dx[t0];
dx[t0] = dx[t1];
dx[t1] = dt0;
}
}
}
} else {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto top = 2 * tid + 2;
if (top < xTadLength) {
auto t0 = cached ? top - 1 : shape::getIndexOffset(top - 1, tadShapeInfo);
auto t1 = cached ? top : shape::getIndexOffset(top, tadShapeInfo);
if (!descending == (dx[t0] > dx[t1])) {
T dt0 = dx[t0];
dx[t0] = dx[t1];
dx[t1] = dt0;
}
}
}
}
__syncthreads();
}
if (cached) {
dx = x + tadOffsets[r];
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto t0 = shape::getIndexOffset(tid, tadShapeInfo);
dx[t0] = shmem[tid];
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void oesTadGeneric(dim3 &launchDims, hipStream_t *stream,
void *vx, Nd4jLong const* xShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
bool descending) {
hipLaunchKernelGGL(( execOesTadKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
}
template <typename X, typename Y>
__host__ void oesTadGenericKey(dim3 &launchDims, hipStream_t *stream,
void *vx, Nd4jLong const* xShapeInfo,
void *vy, Nd4jLong const* yShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
bool descending) {
hipLaunchKernelGGL(( execOesTadKernelKey<X,Y>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, vy, yShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT oesTadGeneric, (dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong const* xShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, bool descending), LIBND4J_TYPES);
BUILD_DOUBLE_TEMPLATE(template void ND4J_EXPORT oesTadGenericKey, (dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong const* xShapeInfo, void *vy, Nd4jLong const* yShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, bool descending), LIBND4J_TYPES, LIBND4J_TYPES);
| 86926e14c4fe8aa7030ae9cd61edd90a73ac9636.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <ops/specials_cuda.h>
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
__global__ void execOesTadKernelKey(void *vx, Nd4jLong const* xShapeInfo,
void *vy, Nd4jLong const* yShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
bool descending) {
auto x = static_cast<X*>(vx);
auto y = static_cast<Y*>(vy);
__shared__ int xLength;
__shared__ int xTadLength;
__shared__ int numTads;
if (threadIdx.x == 0) {
xLength = shape::length(xShapeInfo);
xTadLength = shape::length(tadShapeInfo);
numTads = xLength / xTadLength;
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto dx = x + tadOffsets[r];
auto dy = y + tadOffsets[r];
// this is general loop, we go uncached
int iterations = xTadLength;
for (int i = 0; i < iterations; i++) {
if (i % 2 == 0) {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto top = 2 * tid + 1;
if (top < xTadLength) {
auto t0 = shape::getIndexOffset(top - 1, tadShapeInfo);
auto t1 = shape::getIndexOffset(top, tadShapeInfo);
if (!descending == (dx[t0] > dx[t1])) {
X dt0 = dx[t0];
dx[t0] = dx[t1];
dx[t1] = dt0;
Y dy0 = dy[t0];
dy[t0] = dy[t1];
dy[t1] = dy0;
}
}
}
} else {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto top = 2 * tid + 2;
if (top < xTadLength) {
auto t0 = shape::getIndexOffset(top - 1, tadShapeInfo);
auto t1 = shape::getIndexOffset(top, tadShapeInfo);
if (!descending == (dx[t0] > dx[t1])) {
X dt0 = dx[t0];
dx[t0] = dx[t1];
dx[t1] = dt0;
Y dy0 = dy[t0];
dy[t0] = dy[t1];
dy[t1] = dy0;
}
}
}
}
__syncthreads();
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void execOesTadKernel(void *vx, Nd4jLong const* xShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
bool descending) {
auto x = static_cast<T*>(vx);
const int sharedSize = 32768;
__shared__ int xLength;
__shared__ int xTadLength;
__shared__ int numTads;
__shared__ T *shmem;
__shared__ bool cached;
if (threadIdx.x == 0) {
xLength = shape::length(xShapeInfo);
xTadLength = shape::length(tadShapeInfo);
numTads = xLength / xTadLength;
extern __shared__ unsigned char shrd[];
shmem = (T *) shrd;
cached = xTadLength <= (sharedSize / sizeof(T));
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto dx = x + tadOffsets[r];
// this is general loop, we go uncached
int iterations = xTadLength;
if (cached) {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto t0 = shape::getIndexOffset(tid, tadShapeInfo);
shmem[tid] = dx[t0];
}
__syncthreads();
dx = shmem;
}
for (int i = 0; i < iterations; i++) {
if (i % 2 == 0) {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto top = 2 * tid + 1;
if (top < xTadLength) {
auto t0 = cached ? top - 1 : shape::getIndexOffset(top - 1, tadShapeInfo);
auto t1 = cached ? top : shape::getIndexOffset(top, tadShapeInfo);
if (!descending == (dx[t0] > dx[t1])) {
T dt0 = dx[t0];
dx[t0] = dx[t1];
dx[t1] = dt0;
}
}
}
} else {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto top = 2 * tid + 2;
if (top < xTadLength) {
auto t0 = cached ? top - 1 : shape::getIndexOffset(top - 1, tadShapeInfo);
auto t1 = cached ? top : shape::getIndexOffset(top, tadShapeInfo);
if (!descending == (dx[t0] > dx[t1])) {
T dt0 = dx[t0];
dx[t0] = dx[t1];
dx[t1] = dt0;
}
}
}
}
__syncthreads();
}
if (cached) {
dx = x + tadOffsets[r];
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto t0 = shape::getIndexOffset(tid, tadShapeInfo);
dx[t0] = shmem[tid];
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void oesTadGeneric(dim3 &launchDims, cudaStream_t *stream,
void *vx, Nd4jLong const* xShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
bool descending) {
execOesTadKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
}
template <typename X, typename Y>
__host__ void oesTadGenericKey(dim3 &launchDims, cudaStream_t *stream,
void *vx, Nd4jLong const* xShapeInfo,
void *vy, Nd4jLong const* yShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
bool descending) {
execOesTadKernelKey<X,Y><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT oesTadGeneric, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong const* xShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, bool descending), LIBND4J_TYPES);
BUILD_DOUBLE_TEMPLATE(template void ND4J_EXPORT oesTadGenericKey, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong const* xShapeInfo, void *vy, Nd4jLong const* yShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, bool descending), LIBND4J_TYPES, LIBND4J_TYPES);
|
121d733949a8ec7842440e421b8fa7124cec273b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <inttypes.h>
#include "static_triangle_counting/cct.hpp"
#include "utils.hpp"
__device__ void conditionalWarpReduce(volatile triangle_t* sharedData,int blockSize,int dataLength){
if(blockSize >= dataLength){
if(threadIdx.x < (dataLength/2))
{sharedData[threadIdx.x] += sharedData[threadIdx.x+(dataLength/2)];}
__syncthreads();
}
}
__device__ void warpReduce(triangle_t* __restrict__ outDataPtr,
volatile triangle_t* __restrict__ sharedData,int blockSize){
conditionalWarpReduce(sharedData,blockSize,64);
conditionalWarpReduce(sharedData,blockSize,32);
conditionalWarpReduce(sharedData,blockSize,16);
conditionalWarpReduce(sharedData,blockSize,8);
conditionalWarpReduce(sharedData,blockSize,4);
if(threadIdx.x == 0)
{*outDataPtr= sharedData[0] + sharedData[1];}
__syncthreads();
}
__device__ void conditionalReduce(volatile triangle_t* __restrict__ sharedData,int blockSize,int dataLength){
if(blockSize >= dataLength){
if(threadIdx.x < (dataLength/2))
{sharedData[threadIdx.x] += sharedData[threadIdx.x+(dataLength/2)];}
__syncthreads();
}
if((blockSize < dataLength) && (blockSize > (dataLength/2))){
if(threadIdx.x+(dataLength/2) < blockSize){
sharedData[threadIdx.x] += sharedData[threadIdx.x+(dataLength/2)];
}
__syncthreads();
}
}
__device__ void blockReduce(triangle_t* __restrict__ outGlobalDataPtr,
volatile triangle_t* __restrict__ sharedData,int blockSize){
__syncthreads();
conditionalReduce(sharedData,blockSize,1024);
conditionalReduce(sharedData,blockSize,512);
conditionalReduce(sharedData,blockSize,256);
conditionalReduce(sharedData,blockSize,128);
warpReduce(outGlobalDataPtr, sharedData, blockSize);
__syncthreads();
}
__device__ void initialize(const vertexId_t diag_id, const length_t u_len, length_t v_len,
length_t* const __restrict__ u_min, length_t* const __restrict__ u_max,
length_t* const __restrict__ v_min, length_t* const __restrict__ v_max,
int* const __restrict__ found)
{
if (diag_id == 0){
*u_min=*u_max=*v_min=*v_max=0;
*found=1;
}
else if (diag_id < u_len){
*u_min=0; *u_max=diag_id;
*v_max=diag_id;*v_min=0;
}
else if (diag_id < v_len){
*u_min=0; *u_max=u_len;
*v_max=diag_id;*v_min=diag_id-u_len;
}
else{
*u_min=diag_id-v_len; *u_max=u_len;
*v_min=diag_id-u_len; *v_max=v_len;
}
}
__device__ void workPerThread(const length_t uLength, const length_t vLength,
const int threadsPerIntersection, const int threadId,
int * const __restrict__ outWorkPerThread, int * const __restrict__ outDiagonalId){
int totalWork = uLength + vLength;
int remainderWork = totalWork%threadsPerIntersection;
int workPerThread = totalWork/threadsPerIntersection;
int longDiagonals = (threadId > remainderWork) ? remainderWork:threadId;
int shortDiagonals = (threadId > remainderWork) ? (threadId - remainderWork):0;
*outDiagonalId = ((workPerThread+1)*longDiagonals) + (workPerThread*shortDiagonals);
*outWorkPerThread = workPerThread + (threadId < remainderWork);
}
__device__ void bSearch(unsigned int found, const vertexId_t diagonalId,
vertexId_t const * const __restrict__ uNodes, vertexId_t const * const __restrict__ vNodes,
length_t const * const __restrict__ uLength,
length_t * const __restrict__ outUMin, length_t * const __restrict__ outUMax,
length_t * const __restrict__ outVMin, length_t * const __restrict__ outVMax,
length_t * const __restrict__ outUCurr,
length_t * const __restrict__ outVCurr){
length_t length;
while(!found) {
*outUCurr = (*outUMin + *outUMax)>>1;
*outVCurr = diagonalId - *outUCurr;
if(*outVCurr >= *outVMax){
length = *outUMax - *outUMin;
if(length == 1){
found = 1;
continue;
}
}
unsigned int comp1 = uNodes[*outUCurr] > vNodes[*outVCurr-1];
unsigned int comp2 = uNodes[*outUCurr-1] > vNodes[*outVCurr];
if(comp1 && !comp2){
found = 1;
}
else if(comp1){
*outVMin = *outVCurr;
*outUMax = *outUCurr;
}
else{
*outVMax = *outVCurr;
*outUMin = *outUCurr;
}
}
if((*outVCurr >= *outVMax) && (length == 1) && (*outVCurr > 0) &&
(*outUCurr > 0) && (*outUCurr < (*uLength - 1))){
unsigned int comp1 = uNodes[*outUCurr] > vNodes[*outVCurr - 1];
unsigned int comp2 = uNodes[*outUCurr - 1] > vNodes[*outVCurr];
if(!comp1 && !comp2){(*outUCurr)++; (*outVCurr)--;}
}
}
__device__ int fixStartPoint(const length_t uLength, const length_t vLength,
length_t * const __restrict__ uCurr, length_t * const __restrict__ vCurr,
vertexId_t const * const __restrict__ uNodes, vertexId_t const * const __restrict__ vNodes){
unsigned int uBigger = (*uCurr > 0) && (*vCurr < vLength) && (uNodes[*uCurr-1] == vNodes[*vCurr]);
unsigned int vBigger = (*vCurr > 0) && (*uCurr < uLength) && (vNodes[*vCurr-1] == uNodes[*uCurr]);
*uCurr += vBigger;
*vCurr += uBigger;
return (uBigger + vBigger);
}
template <bool uMasked, bool vMasked, bool subtract, bool upd3rdV>
__device__ void intersectCount(const length_t uLength, const length_t vLength,
vertexId_t const * const __restrict__ uNodes, vertexId_t const * const __restrict__ vNodes,
length_t * const __restrict__ uCurr, length_t * const __restrict__ vCurr,
int * const __restrict__ workIndex, int * const __restrict__ workPerThread,
int * const __restrict__ triangles, int found, triangle_t * const __restrict__ outPutTriangles,
vertexId_t const * const __restrict__ uMask, vertexId_t const * const __restrict__ vMask)
{
if((*uCurr < uLength) && (*vCurr < vLength)){
int comp;
int vmask;
int umask;
while(*workIndex < *workPerThread){
vmask = (vMasked) ? vMask[*vCurr] : 0;
umask = (uMasked) ? uMask[*uCurr] : 0;
comp = uNodes[*uCurr] - vNodes[*vCurr];
*triangles += (comp == 0 && !umask && !vmask);
if (upd3rdV && comp == 0 && !umask && !vmask)
if (subtract) atomicSub(outPutTriangles + uNodes[*uCurr], 1);
else atomicAdd(outPutTriangles + uNodes[*uCurr], 1);
*uCurr += (comp <= 0 && !vmask) || umask;
*vCurr += (comp >= 0 && !umask) || vmask;
*workIndex += (comp == 0&& !umask && !vmask) + 1;
if((*vCurr == vLength) || (*uCurr == uLength)){
break;
}
}
*triangles -= ((comp == 0) && (*workIndex > *workPerThread) && (found));
}
}
// u_len < v_len
template <bool uMasked, bool vMasked, bool subtract, bool upd3rdV>
__device__ triangle_t count_triangles(vertexId_t u, vertexId_t const * const __restrict__ u_nodes, length_t u_len,
vertexId_t v, vertexId_t const * const __restrict__ v_nodes, length_t v_len, int threads_per_block,
volatile vertexId_t* __restrict__ firstFound, int tId, triangle_t * const __restrict__ outPutTriangles,
vertexId_t const * const __restrict__ uMask, vertexId_t const * const __restrict__ vMask)
{
// Partitioning the work to the multiple thread of a single GPU processor. The threads should get a near equal number of the elements to Tersect - this number will be off by 1.
int work_per_thread, diag_id;
workPerThread(u_len, v_len, threads_per_block, tId, &work_per_thread, &diag_id);
triangle_t triangles = 0;
int work_index = 0,found=0;
length_t u_min,u_max,v_min,v_max,u_curr,v_curr;
firstFound[tId]=0;
if(work_per_thread>0){
// For the binary search, we are figuring out the initial poT of search.
initialize(diag_id, u_len, v_len,&u_min, &u_max,&v_min, &v_max,&found);
u_curr = 0; v_curr = 0;
bSearch(found, diag_id, u_nodes, v_nodes, &u_len, &u_min, &u_max, &v_min,
&v_max, &u_curr, &v_curr);
int sum = fixStartPoint(u_len, v_len, &u_curr, &v_curr, u_nodes, v_nodes);
work_index += sum;
if(tId > 0)
firstFound[tId-1] = sum;
triangles += sum;
intersectCount<uMasked, vMasked, subtract, upd3rdV>(
u_len, v_len, u_nodes, v_nodes, &u_curr, &v_curr,
&work_index, &work_per_thread, &triangles, firstFound[tId], outPutTriangles,
uMask, vMask);
}
return triangles;
}
__device__ void workPerBlock(const length_t numVertices,
length_t * const __restrict__ outMpStart,
length_t * const __restrict__ outMpEnd, int blockSize)
{
length_t verticesPerMp = numVertices/gridDim.x;
length_t remainderBlocks = numVertices % gridDim.x;
length_t extraVertexBlocks = (blockIdx.x > remainderBlocks)? remainderBlocks:blockIdx.x;
length_t regularVertexBlocks = (blockIdx.x > remainderBlocks)? blockIdx.x - remainderBlocks:0;
length_t mpStart = ((verticesPerMp+1)*extraVertexBlocks) + (verticesPerMp*regularVertexBlocks);
*outMpStart = mpStart;
*outMpEnd = mpStart + verticesPerMp + (blockIdx.x < remainderBlocks);
}
__global__ void devicecuStingerNewTriangles(cuStinger* custing, BatchUpdateData *bud,
triangle_t * const __restrict__ outPutTriangles, const int threads_per_block,
const int number_blocks, const int shifter)
{
length_t batchSize = *(bud->getBatchSize());
// Partitioning the work to the multiple thread of a single GPU processor. The threads should get a near equal number of the elements to intersect - this number will be off by no more than one.
int tx = threadIdx.x;
length_t this_mp_start, this_mp_stop;
length_t *d_off = bud->getOffsets();
vertexId_t * d_ind = bud->getDst();
vertexId_t * d_seg = bud->getSrc();
const int blockSize = blockDim.x;
workPerBlock(batchSize, &this_mp_start, &this_mp_stop, blockSize);
__shared__ vertexId_t firstFound[1024];
length_t adj_offset=tx>>shifter;
length_t* firstFoundPos=firstFound + (adj_offset<<shifter);
for (length_t edge = this_mp_start+adj_offset; edge < this_mp_stop; edge+=number_blocks){
if (bud->getIndDuplicate()[edge]==1) // this means it's a duplicate edge
continue;
vertexId_t src = d_seg[edge];
vertexId_t dest= d_ind[edge];
length_t srcLen=custing->dVD->getUsed()[src];
length_t destLen=custing->dVD->getUsed()[dest];
bool avoidCalc = (src == dest) || (destLen < 2) || (srcLen < 2);
if(avoidCalc)
continue;
bool sourceSmaller = (srcLen<destLen);
vertexId_t small = sourceSmaller? src : dest;
vertexId_t large = sourceSmaller? dest : src;
length_t small_len = sourceSmaller? srcLen : destLen;
length_t large_len = sourceSmaller? destLen : srcLen;
const vertexId_t* small_ptr = custing->dVD->getAdj()[small]->dst;
const vertexId_t* large_ptr = custing->dVD->getAdj()[large]->dst;
triangle_t tCount = count_triangles<false, false, false, true>(
small, small_ptr, small_len,
large,large_ptr, large_len,
threads_per_block,firstFoundPos,
tx%threads_per_block, outPutTriangles,
NULL, NULL);
atomicAdd(outPutTriangles + src, tCount);
atomicAdd(outPutTriangles + dest, tCount);
__syncthreads();
}
}
__global__ void deviceBUThreeTriangles (BatchUpdateData *bud,
triangle_t * const __restrict__ outPutTriangles, const int threads_per_block,
const int number_blocks, const int shifter)
{
length_t batchsize = *(bud->getBatchSize());
// Partitioning the work to the multiple thread of a single GPU processor. The threads should get a near equal number of the elements to intersect - this number will be off by no more than one.
int tx = threadIdx.x;
length_t this_mp_start, this_mp_stop;
length_t *d_off = bud->getOffsets();
vertexId_t * d_ind = bud->getDst();
vertexId_t * d_seg = bud->getSrc();
const int blockSize = blockDim.x;
workPerBlock(batchsize, &this_mp_start, &this_mp_stop, blockSize);
__shared__ vertexId_t firstFound[1024];
length_t adj_offset=tx>>shifter;
length_t* firstFoundPos=firstFound + (adj_offset<<shifter);
for (length_t edge = this_mp_start+adj_offset; edge < this_mp_stop; edge+=number_blocks){
if (bud->getIndDuplicate()[edge]) // this means it's a duplicate edge
continue;
vertexId_t src = d_seg[edge];
vertexId_t dest= d_ind[edge];
length_t srcLen= d_off[src+1] - d_off[src];
length_t destLen=d_off[dest+1] - d_off[dest];
bool avoidCalc = (src == dest) || (destLen < 2) || (srcLen < 2);
if(avoidCalc)
continue;
bool sourceSmaller = (srcLen<destLen);
vertexId_t small = sourceSmaller? src : dest;
vertexId_t large = sourceSmaller? dest : src;
length_t small_len = sourceSmaller? srcLen : destLen;
length_t large_len = sourceSmaller? destLen : srcLen;
vertexId_t const * const small_ptr = d_ind + d_off[small];
vertexId_t const * const large_ptr = d_ind + d_off[large];
vertexId_t const * const small_mask_ptr = bud->getIndDuplicate() + d_off[small];
vertexId_t const * const large_mask_ptr = bud->getIndDuplicate() + d_off[large];
triangle_t tCount = count_triangles<true, true, false, false>(
small, small_ptr, small_len,
large,large_ptr, large_len,
threads_per_block,firstFoundPos,
tx%threads_per_block, outPutTriangles,
small_mask_ptr, large_mask_ptr);
atomicAdd(outPutTriangles + src, tCount);
__syncthreads();
}
}
__global__ void deviceBUTwoCUOneTriangles (BatchUpdateData *bud, cuStinger* custing,
triangle_t * const __restrict__ outPutTriangles, const int threads_per_block,
const int number_blocks, const int shifter)
{
length_t batchsize = *(bud->getBatchSize());
// Partitioning the work to the multiple thread of a single GPU processor. The threads should get a near equal number of the elements to intersect - this number will be off by no more than one.
int tx = threadIdx.x;
vertexId_t this_mp_start, this_mp_stop;
length_t *d_off = bud->getOffsets();
vertexId_t * d_ind = bud->getDst();
vertexId_t * d_seg = bud->getSrc();
const int blockSize = blockDim.x;
workPerBlock(batchsize, &this_mp_start, &this_mp_stop, blockSize);
__shared__ vertexId_t firstFound[1024];
length_t adj_offset=tx>>shifter;
length_t* firstFoundPos=firstFound + (adj_offset<<shifter);
for (length_t edge = this_mp_start+adj_offset; edge < this_mp_stop; edge+=number_blocks){
if (bud->getIndDuplicate()[edge]) // this means it's a duplicate edge
continue;
vertexId_t src = bud->getSrc()[edge];
vertexId_t dest= bud->getDst()[edge];
length_t srcLen= d_off[src+1] - d_off[src];
length_t destLen=custing->dVD->getUsed()[dest];
bool avoidCalc = (src == dest) || (destLen < 2) || (srcLen < 2);
if(avoidCalc)
continue;
vertexId_t const * const src_ptr = d_ind + d_off[src];
vertexId_t const * const src_mask_ptr = bud->getIndDuplicate() + d_off[src];
vertexId_t const * const dst_ptr = custing->dVD->getAdj()[dest]->dst;
bool sourceSmaller = (srcLen<destLen);
vertexId_t small = sourceSmaller? src : dest;
vertexId_t large = sourceSmaller? dest : src;
length_t small_len = sourceSmaller? srcLen : destLen;
length_t large_len = sourceSmaller? destLen : srcLen;
vertexId_t const * const small_ptr = sourceSmaller? src_ptr : dst_ptr;
vertexId_t const * const small_mask_ptr = sourceSmaller? src_mask_ptr : NULL;
vertexId_t const * const large_ptr = sourceSmaller? dst_ptr : src_ptr;
vertexId_t const * const large_mask_ptr = sourceSmaller? NULL : src_mask_ptr;
triangle_t tCount = (sourceSmaller)?
count_triangles<true, false, true, true>(
small, small_ptr, small_len,
large,large_ptr, large_len,
threads_per_block,firstFoundPos,
tx%threads_per_block, outPutTriangles,
small_mask_ptr, large_mask_ptr):
count_triangles<false, true, true, true>(
small, small_ptr, small_len,
large,large_ptr, large_len,
threads_per_block,firstFoundPos,
tx%threads_per_block, outPutTriangles,
small_mask_ptr, large_mask_ptr )
;
atomicSub(outPutTriangles + src, tCount);
atomicSub(outPutTriangles + dest, tCount);
__syncthreads();
}
}
void callDeviceNewTriangles(cuStinger& custing, BatchUpdate& bu,
triangle_t * const __restrict__ outPutTriangles, const int threads_per_block,
const int number_blocks, const int shifter, const int thread_blocks, const int blockdim,
triangle_t * const __restrict__ h_triangles, triangle_t * const __restrict__ h_triangles_t)
{
hipEvent_t ce_start,ce_stop;
dim3 numBlocks(1, 1);
length_t batchsize = *(bu.getHostBUD()->getBatchSize());
length_t nv = *(bu.getHostBUD()->getNumVertices());
numBlocks.x = ceil((float)(batchsize*threads_per_block)/(float)blockdim);
// Calculate all new traingles regardless of repetition
start_clock(ce_start, ce_stop);
hipLaunchKernelGGL(( devicecuStingerNewTriangles), dim3(numBlocks), dim3(blockdim), 0, 0, custing.devicePtr(), bu.getDeviceBUD()->devicePtr(), outPutTriangles, threads_per_block,number_blocks,shifter);
printf("\n%s <%d> %f\n", __FUNCTION__, __LINE__, end_clock(ce_start, ce_stop));
// Calculate triangles formed by only new edges
start_clock(ce_start, ce_stop);
hipLaunchKernelGGL(( deviceBUThreeTriangles), dim3(numBlocks),dim3(blockdim), 0, 0, bu.getDeviceBUD()->devicePtr(), outPutTriangles, threads_per_block,number_blocks,shifter);
printf("\n%s <%d> %f\n", __FUNCTION__, __LINE__, end_clock(ce_start, ce_stop));
// Calculate triangles formed by two new edges
start_clock(ce_start, ce_stop);
hipLaunchKernelGGL(( deviceBUTwoCUOneTriangles), dim3(numBlocks),dim3(blockdim), 0, 0, bu.getDeviceBUD()->devicePtr(),custing.devicePtr(), outPutTriangles, threads_per_block,number_blocks,shifter);
printf("\n%s <%d> %f\n", __FUNCTION__, __LINE__, end_clock(ce_start, ce_stop));
}
__global__ void comparecus(cuStinger* cus1, cuStinger* cus2)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
length_t nv = cus1->nv;
if (tid < nv)
{
vertexId_t * adj1 = cus1->dVD->adj[tid]->dst;
vertexId_t * adj2 = cus2->dVD->adj[tid]->dst;
length_t size1 = cus1->dVD->getUsed()[tid];
length_t size2 = cus2->dVD->getUsed()[tid];
if (size1 != size2)
{
printf("size mismatch %d %d\n", size1, size2);
}
for (int i = 0; i < size1; ++i)
{
if (adj1[i] != adj2[i])
{
printf("adj mismatch vertex %d, %d %d\n", tid, adj1[i], adj2[i]);
for (int j = 0; j < size1; ++j)
{
printf("%d adj1 %d\n", tid, adj1[j]);
}
printf("%d ==\n", tid);
for (int j = 0; j < size1; ++j)
{
printf("%d adj2 %d\n", tid, adj2[j]);
}
}
}
}
}
void compareCUS(cuStinger* cus1, cuStinger* cus2)
{
length_t nv = cus1->nv;
dim3 numBlocks(1, 1);
int32_t threads=32;
dim3 threadsPerBlock(threads, 1);
numBlocks.x = ceil((float)nv/(float)threads);
hipLaunchKernelGGL(( comparecus), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, cus1->devicePtr(),cus2->devicePtr());
}
| 121d733949a8ec7842440e421b8fa7124cec273b.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <inttypes.h>
#include "static_triangle_counting/cct.hpp"
#include "utils.hpp"
__device__ void conditionalWarpReduce(volatile triangle_t* sharedData,int blockSize,int dataLength){
if(blockSize >= dataLength){
if(threadIdx.x < (dataLength/2))
{sharedData[threadIdx.x] += sharedData[threadIdx.x+(dataLength/2)];}
__syncthreads();
}
}
__device__ void warpReduce(triangle_t* __restrict__ outDataPtr,
volatile triangle_t* __restrict__ sharedData,int blockSize){
conditionalWarpReduce(sharedData,blockSize,64);
conditionalWarpReduce(sharedData,blockSize,32);
conditionalWarpReduce(sharedData,blockSize,16);
conditionalWarpReduce(sharedData,blockSize,8);
conditionalWarpReduce(sharedData,blockSize,4);
if(threadIdx.x == 0)
{*outDataPtr= sharedData[0] + sharedData[1];}
__syncthreads();
}
__device__ void conditionalReduce(volatile triangle_t* __restrict__ sharedData,int blockSize,int dataLength){
if(blockSize >= dataLength){
if(threadIdx.x < (dataLength/2))
{sharedData[threadIdx.x] += sharedData[threadIdx.x+(dataLength/2)];}
__syncthreads();
}
if((blockSize < dataLength) && (blockSize > (dataLength/2))){
if(threadIdx.x+(dataLength/2) < blockSize){
sharedData[threadIdx.x] += sharedData[threadIdx.x+(dataLength/2)];
}
__syncthreads();
}
}
__device__ void blockReduce(triangle_t* __restrict__ outGlobalDataPtr,
volatile triangle_t* __restrict__ sharedData,int blockSize){
__syncthreads();
conditionalReduce(sharedData,blockSize,1024);
conditionalReduce(sharedData,blockSize,512);
conditionalReduce(sharedData,blockSize,256);
conditionalReduce(sharedData,blockSize,128);
warpReduce(outGlobalDataPtr, sharedData, blockSize);
__syncthreads();
}
__device__ void initialize(const vertexId_t diag_id, const length_t u_len, length_t v_len,
length_t* const __restrict__ u_min, length_t* const __restrict__ u_max,
length_t* const __restrict__ v_min, length_t* const __restrict__ v_max,
int* const __restrict__ found)
{
if (diag_id == 0){
*u_min=*u_max=*v_min=*v_max=0;
*found=1;
}
else if (diag_id < u_len){
*u_min=0; *u_max=diag_id;
*v_max=diag_id;*v_min=0;
}
else if (diag_id < v_len){
*u_min=0; *u_max=u_len;
*v_max=diag_id;*v_min=diag_id-u_len;
}
else{
*u_min=diag_id-v_len; *u_max=u_len;
*v_min=diag_id-u_len; *v_max=v_len;
}
}
__device__ void workPerThread(const length_t uLength, const length_t vLength,
const int threadsPerIntersection, const int threadId,
int * const __restrict__ outWorkPerThread, int * const __restrict__ outDiagonalId){
int totalWork = uLength + vLength;
int remainderWork = totalWork%threadsPerIntersection;
int workPerThread = totalWork/threadsPerIntersection;
int longDiagonals = (threadId > remainderWork) ? remainderWork:threadId;
int shortDiagonals = (threadId > remainderWork) ? (threadId - remainderWork):0;
*outDiagonalId = ((workPerThread+1)*longDiagonals) + (workPerThread*shortDiagonals);
*outWorkPerThread = workPerThread + (threadId < remainderWork);
}
__device__ void bSearch(unsigned int found, const vertexId_t diagonalId,
vertexId_t const * const __restrict__ uNodes, vertexId_t const * const __restrict__ vNodes,
length_t const * const __restrict__ uLength,
length_t * const __restrict__ outUMin, length_t * const __restrict__ outUMax,
length_t * const __restrict__ outVMin, length_t * const __restrict__ outVMax,
length_t * const __restrict__ outUCurr,
length_t * const __restrict__ outVCurr){
length_t length;
while(!found) {
*outUCurr = (*outUMin + *outUMax)>>1;
*outVCurr = diagonalId - *outUCurr;
if(*outVCurr >= *outVMax){
length = *outUMax - *outUMin;
if(length == 1){
found = 1;
continue;
}
}
unsigned int comp1 = uNodes[*outUCurr] > vNodes[*outVCurr-1];
unsigned int comp2 = uNodes[*outUCurr-1] > vNodes[*outVCurr];
if(comp1 && !comp2){
found = 1;
}
else if(comp1){
*outVMin = *outVCurr;
*outUMax = *outUCurr;
}
else{
*outVMax = *outVCurr;
*outUMin = *outUCurr;
}
}
if((*outVCurr >= *outVMax) && (length == 1) && (*outVCurr > 0) &&
(*outUCurr > 0) && (*outUCurr < (*uLength - 1))){
unsigned int comp1 = uNodes[*outUCurr] > vNodes[*outVCurr - 1];
unsigned int comp2 = uNodes[*outUCurr - 1] > vNodes[*outVCurr];
if(!comp1 && !comp2){(*outUCurr)++; (*outVCurr)--;}
}
}
__device__ int fixStartPoint(const length_t uLength, const length_t vLength,
length_t * const __restrict__ uCurr, length_t * const __restrict__ vCurr,
vertexId_t const * const __restrict__ uNodes, vertexId_t const * const __restrict__ vNodes){
unsigned int uBigger = (*uCurr > 0) && (*vCurr < vLength) && (uNodes[*uCurr-1] == vNodes[*vCurr]);
unsigned int vBigger = (*vCurr > 0) && (*uCurr < uLength) && (vNodes[*vCurr-1] == uNodes[*uCurr]);
*uCurr += vBigger;
*vCurr += uBigger;
return (uBigger + vBigger);
}
template <bool uMasked, bool vMasked, bool subtract, bool upd3rdV>
__device__ void intersectCount(const length_t uLength, const length_t vLength,
vertexId_t const * const __restrict__ uNodes, vertexId_t const * const __restrict__ vNodes,
length_t * const __restrict__ uCurr, length_t * const __restrict__ vCurr,
int * const __restrict__ workIndex, int * const __restrict__ workPerThread,
int * const __restrict__ triangles, int found, triangle_t * const __restrict__ outPutTriangles,
vertexId_t const * const __restrict__ uMask, vertexId_t const * const __restrict__ vMask)
{
if((*uCurr < uLength) && (*vCurr < vLength)){
int comp;
int vmask;
int umask;
while(*workIndex < *workPerThread){
vmask = (vMasked) ? vMask[*vCurr] : 0;
umask = (uMasked) ? uMask[*uCurr] : 0;
comp = uNodes[*uCurr] - vNodes[*vCurr];
*triangles += (comp == 0 && !umask && !vmask);
if (upd3rdV && comp == 0 && !umask && !vmask)
if (subtract) atomicSub(outPutTriangles + uNodes[*uCurr], 1);
else atomicAdd(outPutTriangles + uNodes[*uCurr], 1);
*uCurr += (comp <= 0 && !vmask) || umask;
*vCurr += (comp >= 0 && !umask) || vmask;
*workIndex += (comp == 0&& !umask && !vmask) + 1;
if((*vCurr == vLength) || (*uCurr == uLength)){
break;
}
}
*triangles -= ((comp == 0) && (*workIndex > *workPerThread) && (found));
}
}
// u_len < v_len
template <bool uMasked, bool vMasked, bool subtract, bool upd3rdV>
__device__ triangle_t count_triangles(vertexId_t u, vertexId_t const * const __restrict__ u_nodes, length_t u_len,
vertexId_t v, vertexId_t const * const __restrict__ v_nodes, length_t v_len, int threads_per_block,
volatile vertexId_t* __restrict__ firstFound, int tId, triangle_t * const __restrict__ outPutTriangles,
vertexId_t const * const __restrict__ uMask, vertexId_t const * const __restrict__ vMask)
{
// Partitioning the work to the multiple thread of a single GPU processor. The threads should get a near equal number of the elements to Tersect - this number will be off by 1.
int work_per_thread, diag_id;
workPerThread(u_len, v_len, threads_per_block, tId, &work_per_thread, &diag_id);
triangle_t triangles = 0;
int work_index = 0,found=0;
length_t u_min,u_max,v_min,v_max,u_curr,v_curr;
firstFound[tId]=0;
if(work_per_thread>0){
// For the binary search, we are figuring out the initial poT of search.
initialize(diag_id, u_len, v_len,&u_min, &u_max,&v_min, &v_max,&found);
u_curr = 0; v_curr = 0;
bSearch(found, diag_id, u_nodes, v_nodes, &u_len, &u_min, &u_max, &v_min,
&v_max, &u_curr, &v_curr);
int sum = fixStartPoint(u_len, v_len, &u_curr, &v_curr, u_nodes, v_nodes);
work_index += sum;
if(tId > 0)
firstFound[tId-1] = sum;
triangles += sum;
intersectCount<uMasked, vMasked, subtract, upd3rdV>(
u_len, v_len, u_nodes, v_nodes, &u_curr, &v_curr,
&work_index, &work_per_thread, &triangles, firstFound[tId], outPutTriangles,
uMask, vMask);
}
return triangles;
}
__device__ void workPerBlock(const length_t numVertices,
length_t * const __restrict__ outMpStart,
length_t * const __restrict__ outMpEnd, int blockSize)
{
length_t verticesPerMp = numVertices/gridDim.x;
length_t remainderBlocks = numVertices % gridDim.x;
length_t extraVertexBlocks = (blockIdx.x > remainderBlocks)? remainderBlocks:blockIdx.x;
length_t regularVertexBlocks = (blockIdx.x > remainderBlocks)? blockIdx.x - remainderBlocks:0;
length_t mpStart = ((verticesPerMp+1)*extraVertexBlocks) + (verticesPerMp*regularVertexBlocks);
*outMpStart = mpStart;
*outMpEnd = mpStart + verticesPerMp + (blockIdx.x < remainderBlocks);
}
__global__ void devicecuStingerNewTriangles(cuStinger* custing, BatchUpdateData *bud,
triangle_t * const __restrict__ outPutTriangles, const int threads_per_block,
const int number_blocks, const int shifter)
{
length_t batchSize = *(bud->getBatchSize());
// Partitioning the work to the multiple thread of a single GPU processor. The threads should get a near equal number of the elements to intersect - this number will be off by no more than one.
int tx = threadIdx.x;
length_t this_mp_start, this_mp_stop;
length_t *d_off = bud->getOffsets();
vertexId_t * d_ind = bud->getDst();
vertexId_t * d_seg = bud->getSrc();
const int blockSize = blockDim.x;
workPerBlock(batchSize, &this_mp_start, &this_mp_stop, blockSize);
__shared__ vertexId_t firstFound[1024];
length_t adj_offset=tx>>shifter;
length_t* firstFoundPos=firstFound + (adj_offset<<shifter);
for (length_t edge = this_mp_start+adj_offset; edge < this_mp_stop; edge+=number_blocks){
if (bud->getIndDuplicate()[edge]==1) // this means it's a duplicate edge
continue;
vertexId_t src = d_seg[edge];
vertexId_t dest= d_ind[edge];
length_t srcLen=custing->dVD->getUsed()[src];
length_t destLen=custing->dVD->getUsed()[dest];
bool avoidCalc = (src == dest) || (destLen < 2) || (srcLen < 2);
if(avoidCalc)
continue;
bool sourceSmaller = (srcLen<destLen);
vertexId_t small = sourceSmaller? src : dest;
vertexId_t large = sourceSmaller? dest : src;
length_t small_len = sourceSmaller? srcLen : destLen;
length_t large_len = sourceSmaller? destLen : srcLen;
const vertexId_t* small_ptr = custing->dVD->getAdj()[small]->dst;
const vertexId_t* large_ptr = custing->dVD->getAdj()[large]->dst;
triangle_t tCount = count_triangles<false, false, false, true>(
small, small_ptr, small_len,
large,large_ptr, large_len,
threads_per_block,firstFoundPos,
tx%threads_per_block, outPutTriangles,
NULL, NULL);
atomicAdd(outPutTriangles + src, tCount);
atomicAdd(outPutTriangles + dest, tCount);
__syncthreads();
}
}
__global__ void deviceBUThreeTriangles (BatchUpdateData *bud,
triangle_t * const __restrict__ outPutTriangles, const int threads_per_block,
const int number_blocks, const int shifter)
{
length_t batchsize = *(bud->getBatchSize());
// Partitioning the work to the multiple thread of a single GPU processor. The threads should get a near equal number of the elements to intersect - this number will be off by no more than one.
int tx = threadIdx.x;
length_t this_mp_start, this_mp_stop;
length_t *d_off = bud->getOffsets();
vertexId_t * d_ind = bud->getDst();
vertexId_t * d_seg = bud->getSrc();
const int blockSize = blockDim.x;
workPerBlock(batchsize, &this_mp_start, &this_mp_stop, blockSize);
__shared__ vertexId_t firstFound[1024];
length_t adj_offset=tx>>shifter;
length_t* firstFoundPos=firstFound + (adj_offset<<shifter);
for (length_t edge = this_mp_start+adj_offset; edge < this_mp_stop; edge+=number_blocks){
if (bud->getIndDuplicate()[edge]) // this means it's a duplicate edge
continue;
vertexId_t src = d_seg[edge];
vertexId_t dest= d_ind[edge];
length_t srcLen= d_off[src+1] - d_off[src];
length_t destLen=d_off[dest+1] - d_off[dest];
bool avoidCalc = (src == dest) || (destLen < 2) || (srcLen < 2);
if(avoidCalc)
continue;
bool sourceSmaller = (srcLen<destLen);
vertexId_t small = sourceSmaller? src : dest;
vertexId_t large = sourceSmaller? dest : src;
length_t small_len = sourceSmaller? srcLen : destLen;
length_t large_len = sourceSmaller? destLen : srcLen;
vertexId_t const * const small_ptr = d_ind + d_off[small];
vertexId_t const * const large_ptr = d_ind + d_off[large];
vertexId_t const * const small_mask_ptr = bud->getIndDuplicate() + d_off[small];
vertexId_t const * const large_mask_ptr = bud->getIndDuplicate() + d_off[large];
triangle_t tCount = count_triangles<true, true, false, false>(
small, small_ptr, small_len,
large,large_ptr, large_len,
threads_per_block,firstFoundPos,
tx%threads_per_block, outPutTriangles,
small_mask_ptr, large_mask_ptr);
atomicAdd(outPutTriangles + src, tCount);
__syncthreads();
}
}
__global__ void deviceBUTwoCUOneTriangles (BatchUpdateData *bud, cuStinger* custing,
triangle_t * const __restrict__ outPutTriangles, const int threads_per_block,
const int number_blocks, const int shifter)
{
length_t batchsize = *(bud->getBatchSize());
// Partitioning the work to the multiple thread of a single GPU processor. The threads should get a near equal number of the elements to intersect - this number will be off by no more than one.
int tx = threadIdx.x;
vertexId_t this_mp_start, this_mp_stop;
length_t *d_off = bud->getOffsets();
vertexId_t * d_ind = bud->getDst();
vertexId_t * d_seg = bud->getSrc();
const int blockSize = blockDim.x;
workPerBlock(batchsize, &this_mp_start, &this_mp_stop, blockSize);
__shared__ vertexId_t firstFound[1024];
length_t adj_offset=tx>>shifter;
length_t* firstFoundPos=firstFound + (adj_offset<<shifter);
for (length_t edge = this_mp_start+adj_offset; edge < this_mp_stop; edge+=number_blocks){
if (bud->getIndDuplicate()[edge]) // this means it's a duplicate edge
continue;
vertexId_t src = bud->getSrc()[edge];
vertexId_t dest= bud->getDst()[edge];
length_t srcLen= d_off[src+1] - d_off[src];
length_t destLen=custing->dVD->getUsed()[dest];
bool avoidCalc = (src == dest) || (destLen < 2) || (srcLen < 2);
if(avoidCalc)
continue;
vertexId_t const * const src_ptr = d_ind + d_off[src];
vertexId_t const * const src_mask_ptr = bud->getIndDuplicate() + d_off[src];
vertexId_t const * const dst_ptr = custing->dVD->getAdj()[dest]->dst;
bool sourceSmaller = (srcLen<destLen);
vertexId_t small = sourceSmaller? src : dest;
vertexId_t large = sourceSmaller? dest : src;
length_t small_len = sourceSmaller? srcLen : destLen;
length_t large_len = sourceSmaller? destLen : srcLen;
vertexId_t const * const small_ptr = sourceSmaller? src_ptr : dst_ptr;
vertexId_t const * const small_mask_ptr = sourceSmaller? src_mask_ptr : NULL;
vertexId_t const * const large_ptr = sourceSmaller? dst_ptr : src_ptr;
vertexId_t const * const large_mask_ptr = sourceSmaller? NULL : src_mask_ptr;
triangle_t tCount = (sourceSmaller)?
count_triangles<true, false, true, true>(
small, small_ptr, small_len,
large,large_ptr, large_len,
threads_per_block,firstFoundPos,
tx%threads_per_block, outPutTriangles,
small_mask_ptr, large_mask_ptr):
count_triangles<false, true, true, true>(
small, small_ptr, small_len,
large,large_ptr, large_len,
threads_per_block,firstFoundPos,
tx%threads_per_block, outPutTriangles,
small_mask_ptr, large_mask_ptr )
;
atomicSub(outPutTriangles + src, tCount);
atomicSub(outPutTriangles + dest, tCount);
__syncthreads();
}
}
void callDeviceNewTriangles(cuStinger& custing, BatchUpdate& bu,
triangle_t * const __restrict__ outPutTriangles, const int threads_per_block,
const int number_blocks, const int shifter, const int thread_blocks, const int blockdim,
triangle_t * const __restrict__ h_triangles, triangle_t * const __restrict__ h_triangles_t)
{
cudaEvent_t ce_start,ce_stop;
dim3 numBlocks(1, 1);
length_t batchsize = *(bu.getHostBUD()->getBatchSize());
length_t nv = *(bu.getHostBUD()->getNumVertices());
numBlocks.x = ceil((float)(batchsize*threads_per_block)/(float)blockdim);
// Calculate all new traingles regardless of repetition
start_clock(ce_start, ce_stop);
devicecuStingerNewTriangles<<<numBlocks, blockdim>>>(custing.devicePtr(), bu.getDeviceBUD()->devicePtr(), outPutTriangles, threads_per_block,number_blocks,shifter);
printf("\n%s <%d> %f\n", __FUNCTION__, __LINE__, end_clock(ce_start, ce_stop));
// Calculate triangles formed by only new edges
start_clock(ce_start, ce_stop);
deviceBUThreeTriangles<<<numBlocks,blockdim>>>(bu.getDeviceBUD()->devicePtr(), outPutTriangles, threads_per_block,number_blocks,shifter);
printf("\n%s <%d> %f\n", __FUNCTION__, __LINE__, end_clock(ce_start, ce_stop));
// Calculate triangles formed by two new edges
start_clock(ce_start, ce_stop);
deviceBUTwoCUOneTriangles<<<numBlocks,blockdim>>>(bu.getDeviceBUD()->devicePtr(),custing.devicePtr(), outPutTriangles, threads_per_block,number_blocks,shifter);
printf("\n%s <%d> %f\n", __FUNCTION__, __LINE__, end_clock(ce_start, ce_stop));
}
__global__ void comparecus(cuStinger* cus1, cuStinger* cus2)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
length_t nv = cus1->nv;
if (tid < nv)
{
vertexId_t * adj1 = cus1->dVD->adj[tid]->dst;
vertexId_t * adj2 = cus2->dVD->adj[tid]->dst;
length_t size1 = cus1->dVD->getUsed()[tid];
length_t size2 = cus2->dVD->getUsed()[tid];
if (size1 != size2)
{
printf("size mismatch %d %d\n", size1, size2);
}
for (int i = 0; i < size1; ++i)
{
if (adj1[i] != adj2[i])
{
printf("adj mismatch vertex %d, %d %d\n", tid, adj1[i], adj2[i]);
for (int j = 0; j < size1; ++j)
{
printf("%d adj1 %d\n", tid, adj1[j]);
}
printf("%d ==\n", tid);
for (int j = 0; j < size1; ++j)
{
printf("%d adj2 %d\n", tid, adj2[j]);
}
}
}
}
}
void compareCUS(cuStinger* cus1, cuStinger* cus2)
{
length_t nv = cus1->nv;
dim3 numBlocks(1, 1);
int32_t threads=32;
dim3 threadsPerBlock(threads, 1);
numBlocks.x = ceil((float)nv/(float)threads);
comparecus<<<numBlocks, threadsPerBlock>>>(cus1->devicePtr(),cus2->devicePtr());
}
|
260b1b3ce9623ed68ffaf4f9263c059a153f6b2e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
Type de kernel :
__global__ : kernel execut sur le GPU, mais appel par le CPU
__device__ : kernel execut et appel par le GPU
__host__ : mode par dfaut : execut et appel par le CPU
Appel de kernel :
hipLaunchKernelGGL(( kernel) , dim3(nBlocks), dim3(threadsParBloc) , 0, 0, arguments);
nBlocks : nombre de subdivisions appliques la grille calculer, type dim3
threadsParBLoc : nombre de threads executer simultanement sur chaque
bloc, de type dim3.
Chaque kernel dispose de variables implicites, en lecture seule
blockIdx : index du bloc dans la grille
threadIdx : index du thread dans le bloc
blockDim : nombre de threads par bloc
int* A;
int size = n*n* sizeof(int);
hipMalloc( (void**) &A, size);
hipFree(A);
hipMemcpy(A_GPU, A_CPU, size, hipMemcpyHostToDevice);
| 260b1b3ce9623ed68ffaf4f9263c059a153f6b2e.cu | #include <cuda.h>
#include <cuda_runtime.h>
Type de kernel :
__global__ : kernel executé sur le GPU, mais appelé par le CPU
__device__ : kernel executé et appelé par le GPU
__host__ : mode par défaut : executé et appelé par le CPU
Appel de kernel :
kernel <<< nBlocks, threadsParBloc >>> (arguments);
nBlocks : nombre de subdivisions appliquées à la grille à calculer, type dim3
threadsParBLoc : nombre de threads à executer simultanement sur chaque
bloc, de type dim3.
Chaque kernel dispose de variables implicites, en lecture seule
blockIdx : index du bloc dans la grille
threadIdx : index du thread dans le bloc
blockDim : nombre de threads par bloc
int* A;
int size = n*n* sizeof(int);
cudaMalloc( (void**) &A, size);
cudaFree(A);
cudaMemcpy(A_GPU, A_CPU, size, cudaMemcpyHostToDevice);
|
bbf4298044cefc41e39116f28475fafa5c26f530.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/filter_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void filter_gpu(const int nthreads, const Dtype* from_data,
Dtype* to_data, const int from, const int to, const int hw, const int chw, const int cphw) {
CUDA_KERNEL_LOOP(index, nthreads) {
int from_idx = (index / hw ) * chw + from * hw + index % hw;
int to_idx = (index / hw ) * cphw + to * hw + index % hw;
*(to_data + to_idx) = *(from_data + from_idx);
}
}
template <typename Dtype>
__global__ void filter_zero_gpu(const int nthreads,
Dtype* to_data, const int to, const int hw, const int chw) {
CUDA_KERNEL_LOOP(index, nthreads) {
int to_idx = (index / hw ) * chw + to * hw + index % hw;
// *(to_data + to_idx) = Dtype(0);
memset(to_data + to_idx, 0, sizeof(Dtype));
}
}
template <typename Dtype>
void FilterLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (!axis_){
NOT_IMPLEMENTED;
return;
}
// forward all filtered items for all bottoms but the Selector (bottom[last])
for (int t = 0; t < top.size(); ++t) {
int new_tops_num = top[t]->shape(axis_);
CHECK_EQ(indices_to_forward_.size(), new_tops_num);
const Dtype* bottom_data = bottom[t]->gpu_data();
Dtype* top_data = top[t]->mutable_gpu_data();
int dim = bottom[t]->count() / bottom[t]->shape(axis_);
const int hw = bottom[t]->shape(2) * bottom[t]->shape(3);
const int chw = bottom[t]->shape(1) * bottom[t]->shape(2) * bottom[t]->shape(3);
const int cphw = new_tops_num * bottom[t]->shape(2) * bottom[t]->shape(3);
for (int n = 0; n < new_tops_num; ++n) {
if (axis_) {
filter_gpu<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
dim, bottom_data, top_data, indices_to_forward_[n], n, hw, chw, cphw);
}
else {
int data_offset_top = n * dim;
int data_offset_bottom = indices_to_forward_[n] * dim;
caffe_copy(dim, bottom_data + data_offset_bottom,
top_data + data_offset_top);
}
}
}
}
template <typename Dtype>
void FilterLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!axis_){
NOT_IMPLEMENTED;
return;
}
for (int i = 0; i < top.size(); ++i) {
// bottom[last] is the selector and never needs backpropagation
// so we can iterate over top vector because top.size() == bottom.size() -1
int new_tops_num = top[i]->shape(axis_);
CHECK_EQ(indices_to_forward_.size(), new_tops_num);
int c = bottom[i]->shape(axis_);
const int dim = top[i]->count() / new_tops_num;
const int hw = bottom[i]->shape(2) * bottom[i]->shape(3);
const int chw = c * hw;
const int cphw = new_tops_num * hw;
int next_to_backward_offset = 0;
int batch_offset;
int data_offset_bottom;
int data_offset_top;
int zeroout;
for (int n = 0; n < c; ++n) {
data_offset_bottom = n * dim;
zeroout = 0;
if (next_to_backward_offset >= new_tops_num) {
// we already visited all items that were been forwarded, so
// just set to zero remaining ones
zeroout = 1;
} else {
batch_offset = indices_to_forward_[next_to_backward_offset];
if (n != batch_offset) { // this data was not been forwarded
zeroout = 1;
} else { // this data was been forwarded
if (axis_) {
filter_gpu<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
dim, top[i]->mutable_gpu_diff(), bottom[i]->mutable_gpu_diff(), next_to_backward_offset, n, hw, cphw, chw);
}
else {
data_offset_top = next_to_backward_offset * dim;
caffe_copy(dim, top[i]->mutable_gpu_diff() + data_offset_top,
bottom[i]->mutable_gpu_diff() + data_offset_bottom);
}
++next_to_backward_offset; // point to next forwarded item index
}
}
if (zeroout){
if (axis_){
filter_zero_gpu<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
dim, bottom[i]->mutable_gpu_diff(), n, hw, chw);
} else{
caffe_gpu_set(dim, Dtype(0),
bottom[i]->mutable_gpu_diff() + data_offset_bottom);
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FilterLayer);
} // namespace caffe
| bbf4298044cefc41e39116f28475fafa5c26f530.cu | #include <vector>
#include "caffe/layers/filter_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void filter_gpu(const int nthreads, const Dtype* from_data,
Dtype* to_data, const int from, const int to, const int hw, const int chw, const int cphw) {
CUDA_KERNEL_LOOP(index, nthreads) {
int from_idx = (index / hw ) * chw + from * hw + index % hw;
int to_idx = (index / hw ) * cphw + to * hw + index % hw;
*(to_data + to_idx) = *(from_data + from_idx);
}
}
template <typename Dtype>
__global__ void filter_zero_gpu(const int nthreads,
Dtype* to_data, const int to, const int hw, const int chw) {
CUDA_KERNEL_LOOP(index, nthreads) {
int to_idx = (index / hw ) * chw + to * hw + index % hw;
// *(to_data + to_idx) = Dtype(0);
memset(to_data + to_idx, 0, sizeof(Dtype));
}
}
template <typename Dtype>
void FilterLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (!axis_){
NOT_IMPLEMENTED;
return;
}
// forward all filtered items for all bottoms but the Selector (bottom[last])
for (int t = 0; t < top.size(); ++t) {
int new_tops_num = top[t]->shape(axis_);
CHECK_EQ(indices_to_forward_.size(), new_tops_num);
const Dtype* bottom_data = bottom[t]->gpu_data();
Dtype* top_data = top[t]->mutable_gpu_data();
int dim = bottom[t]->count() / bottom[t]->shape(axis_);
const int hw = bottom[t]->shape(2) * bottom[t]->shape(3);
const int chw = bottom[t]->shape(1) * bottom[t]->shape(2) * bottom[t]->shape(3);
const int cphw = new_tops_num * bottom[t]->shape(2) * bottom[t]->shape(3);
for (int n = 0; n < new_tops_num; ++n) {
if (axis_) {
filter_gpu<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>(
dim, bottom_data, top_data, indices_to_forward_[n], n, hw, chw, cphw);
}
else {
int data_offset_top = n * dim;
int data_offset_bottom = indices_to_forward_[n] * dim;
caffe_copy(dim, bottom_data + data_offset_bottom,
top_data + data_offset_top);
}
}
}
}
template <typename Dtype>
void FilterLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!axis_){
NOT_IMPLEMENTED;
return;
}
for (int i = 0; i < top.size(); ++i) {
// bottom[last] is the selector and never needs backpropagation
// so we can iterate over top vector because top.size() == bottom.size() -1
int new_tops_num = top[i]->shape(axis_);
CHECK_EQ(indices_to_forward_.size(), new_tops_num);
int c = bottom[i]->shape(axis_);
const int dim = top[i]->count() / new_tops_num;
const int hw = bottom[i]->shape(2) * bottom[i]->shape(3);
const int chw = c * hw;
const int cphw = new_tops_num * hw;
int next_to_backward_offset = 0;
int batch_offset;
int data_offset_bottom;
int data_offset_top;
int zeroout;
for (int n = 0; n < c; ++n) {
data_offset_bottom = n * dim;
zeroout = 0;
if (next_to_backward_offset >= new_tops_num) {
// we already visited all items that were been forwarded, so
// just set to zero remaining ones
zeroout = 1;
} else {
batch_offset = indices_to_forward_[next_to_backward_offset];
if (n != batch_offset) { // this data was not been forwarded
zeroout = 1;
} else { // this data was been forwarded
if (axis_) {
filter_gpu<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>(
dim, top[i]->mutable_gpu_diff(), bottom[i]->mutable_gpu_diff(), next_to_backward_offset, n, hw, cphw, chw);
}
else {
data_offset_top = next_to_backward_offset * dim;
caffe_copy(dim, top[i]->mutable_gpu_diff() + data_offset_top,
bottom[i]->mutable_gpu_diff() + data_offset_bottom);
}
++next_to_backward_offset; // point to next forwarded item index
}
}
if (zeroout){
if (axis_){
filter_zero_gpu<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>(
dim, bottom[i]->mutable_gpu_diff(), n, hw, chw);
} else{
caffe_gpu_set(dim, Dtype(0),
bottom[i]->mutable_gpu_diff() + data_offset_bottom);
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FilterLayer);
} // namespace caffe
|
305943296a476a37209354ecfc81a4c407c1ba97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2011-2013 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* CUDA kernel entry points */
#ifdef __CUDA_ARCH__
#include "kernel/kernel_compat_cuda.h"
#include "kernel_config.h"
#include "kernel/kernel_math.h"
#include "kernel/kernel_types.h"
#include "kernel/kernel_globals.h"
#include "kernel/kernel_film.h"
#include "kernel/kernel_path.h"
#include "kernel/kernel_path_branched.h"
#include "kernel/kernel_bake.h"
/* kernels */
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_path_trace(float *buffer, uint *rng_state, int sample, int sx, int sy, int sw, int sh, int offset, int stride)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh) {
KernelGlobals kg;
kernel_path_trace(&kg, buffer, rng_state, sample, x, y, offset, stride);
}
}
#ifdef __BRANCHED_PATH__
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_BRANCHED_MAX_REGISTERS)
kernel_cuda_branched_path_trace(float *buffer, uint *rng_state, int sample, int sx, int sy, int sw, int sh, int offset, int stride)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh) {
KernelGlobals kg;
kernel_branched_path_trace(&kg, buffer, rng_state, sample, x, y, offset, stride);
}
}
#endif
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_convert_to_byte(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride, int skip_linear_to_srgb_conversion)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh) {
kernel_film_convert_to_byte(NULL, rgba, buffer, sample_scale, x, y, offset, stride, skip_linear_to_srgb_conversion);
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_convert_to_half_float(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride, int skip_linear_to_srgb_conversion)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh)
{
kernel_film_convert_to_half_float(NULL, rgba, buffer, sample_scale, x, y, offset, stride, skip_linear_to_srgb_conversion);
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_shader(uint4 *input,
float4 *output,
float *output_luma,
int type,
int sx,
int sw,
int offset,
int sample)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
if(x < sx + sw) {
KernelGlobals kg;
kernel_shader_evaluate(&kg,
input,
output,
output_luma,
(ShaderEvalType)type,
x,
sample);
}
}
#ifdef __BAKING__
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_bake(uint4 *input, float4 *output, int type, int filter, int sx, int sw, int offset, int sample)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
if(x < sx + sw) {
KernelGlobals kg;
kernel_bake_evaluate(&kg, input, output, (ShaderEvalType)type, filter, x, offset, sample);
}
}
#endif
#endif
| 305943296a476a37209354ecfc81a4c407c1ba97.cu | /*
* Copyright 2011-2013 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* CUDA kernel entry points */
#ifdef __CUDA_ARCH__
#include "kernel/kernel_compat_cuda.h"
#include "kernel_config.h"
#include "kernel/kernel_math.h"
#include "kernel/kernel_types.h"
#include "kernel/kernel_globals.h"
#include "kernel/kernel_film.h"
#include "kernel/kernel_path.h"
#include "kernel/kernel_path_branched.h"
#include "kernel/kernel_bake.h"
/* kernels */
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_path_trace(float *buffer, uint *rng_state, int sample, int sx, int sy, int sw, int sh, int offset, int stride)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh) {
KernelGlobals kg;
kernel_path_trace(&kg, buffer, rng_state, sample, x, y, offset, stride);
}
}
#ifdef __BRANCHED_PATH__
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_BRANCHED_MAX_REGISTERS)
kernel_cuda_branched_path_trace(float *buffer, uint *rng_state, int sample, int sx, int sy, int sw, int sh, int offset, int stride)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh) {
KernelGlobals kg;
kernel_branched_path_trace(&kg, buffer, rng_state, sample, x, y, offset, stride);
}
}
#endif
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_convert_to_byte(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride, int skip_linear_to_srgb_conversion)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh) {
kernel_film_convert_to_byte(NULL, rgba, buffer, sample_scale, x, y, offset, stride, skip_linear_to_srgb_conversion);
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_convert_to_half_float(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride, int skip_linear_to_srgb_conversion)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh)
{
kernel_film_convert_to_half_float(NULL, rgba, buffer, sample_scale, x, y, offset, stride, skip_linear_to_srgb_conversion);
}
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_shader(uint4 *input,
float4 *output,
float *output_luma,
int type,
int sx,
int sw,
int offset,
int sample)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
if(x < sx + sw) {
KernelGlobals kg;
kernel_shader_evaluate(&kg,
input,
output,
output_luma,
(ShaderEvalType)type,
x,
sample);
}
}
#ifdef __BAKING__
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_bake(uint4 *input, float4 *output, int type, int filter, int sx, int sw, int offset, int sample)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
if(x < sx + sw) {
KernelGlobals kg;
kernel_bake_evaluate(&kg, input, output, (ShaderEvalType)type, filter, x, offset, sample);
}
}
#endif
#endif
|
520739bc9f452ab8796951b98c84d43bf978b213.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "calc_avg_activation_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int channels = 1;
int batches = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
calc_avg_activation_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,size,channels,batches);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
calc_avg_activation_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,size,channels,batches);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
calc_avg_activation_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,size,channels,batches);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 520739bc9f452ab8796951b98c84d43bf978b213.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "calc_avg_activation_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int channels = 1;
int batches = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
calc_avg_activation_kernel<<<gridBlock,threadBlock>>>(src,dst,size,channels,batches);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
calc_avg_activation_kernel<<<gridBlock,threadBlock>>>(src,dst,size,channels,batches);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
calc_avg_activation_kernel<<<gridBlock,threadBlock>>>(src,dst,size,channels,batches);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
245583ecbc4ed49d8c66df4fb947eeb3175deb5d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <algorithm>
#include <cerrno>
#include <sys/time.h>
#define CHECK_ERROR() \
{ \
hipError_t err = hipGetLastError(); \
if (err != hipSuccess) \
{ \
fprintf(stderr, "%s.%s.%d: %s.\n", __FILE__, __FUNCTION__, __LINE__, hipGetErrorString(err)); \
fflush(stderr); \
exit(1); \
} \
} \
typedef struct _MandelbrotInfo
{
int width, height, maxRows, maxIters;
float xMin, xMax, yMin, yMax;
} MandelbrotInfo;
double wallTime()
{
struct timeval tv;
gettimeofday(&tv, 0);
return static_cast<double>(tv.tv_sec) + static_cast<double>(tv.tv_usec) / 1000000.0;
}
void readInputFile(const char * const input, MandelbrotInfo & minfo)
{
FILE * fp = fopen(input, "r");
char line[2048];
if (!fp)
{
fprintf(stderr, "Error, couldn't open file '%s' for reading.\n", input);
fflush(stderr);
exit(1);
}
while (fgets(line, 2047, fp))
{
char * ptr = line;
while (*ptr && *ptr <= ' ') ++ptr;
if (*ptr == '#') continue;
char * end = ptr + strlen(ptr) - 1;
while (end >= ptr && *end <= ' ') --end;
*(end + 1) = 0;
char var[1024];
int ival;
float fval;
sscanf(ptr, "%s = %d", var, &ival);
sscanf(ptr, "%s = %f", var, &fval);
if (strcmp(var, "width") == 0) minfo.width = ival;
else if (strcmp(var, "height") == 0) minfo.height = ival;
else if (strcmp(var, "maxRows") == 0) minfo.maxRows = ival;
else if (strcmp(var, "maxIters") == 0) minfo.maxIters = ival;
else if (strcmp(var, "xmin") == 0) minfo.xMin = fval;
else if (strcmp(var, "xmax") == 0) minfo.xMax = fval;
else if (strcmp(var, "ymin") == 0) minfo.yMin = fval;
else if (strcmp(var, "ymax") == 0) minfo.yMax = fval;
else
{
fprintf(stderr, "Warning, skipping invalid variable in input file (%s).\n", var);
fflush(stderr);
}
}
fclose(fp);
}
__global__ void scanRow(const MandelbrotInfo minfo, int * pPixels)
{
const int row = blockIdx.x;
const float dx = minfo.xMax - minfo.xMin;
const float dy = minfo.yMax - minfo.yMin;
const float yVal = static_cast<float>(row) / static_cast<float>(minfo.height - 1);
int * pixels = pPixels + row * minfo.width;
for (int p = threadIdx.x; p < minfo.width; p += blockDim.x)
{
int iter;
float z, zi, mag;
const float xVal = static_cast<float>(p) / static_cast<float>(minfo.width - 1);
z = zi = mag = 0.0f;
const float x = minfo.xMin + dx * xVal;
const float y = minfo.yMin + dy * yVal;
for (iter = 0; mag < 4.0f && iter <= minfo.maxIters; ++iter)
{
const float t = z * z - zi * zi + x;
zi = 2.0f * z * zi + y;
z = t;
mag = z * z + zi * zi;
}
pixels[p] = --iter;
}
}
void storeRows(FILE * outfp, const int startOfImage,
const int startRow, const int endRow, const MandelbrotInfo & minfo,
const int * const pixels, unsigned char * const rgb)
{
unsigned char * pixel = rgb;
for (int row = startRow; row < endRow; ++row)
{
const int * rowp = pixels + (row - startRow) * minfo.width;
for (int i = 0; i < minfo.width; ++i)
{
float t = 0.0f;
if (rowp[i] == 0)
{
t = 0.0f;
}
else if (rowp[i] < 16)
{
t = 0.75f * (static_cast<float>(rowp[i]) - 1.0f) / 14.0f;
}
else
{
t = 0.75f + 0.25f * (static_cast<float>(rowp[i]) - 16.0f) / static_cast<float>(minfo.maxIters - 16);
}
*(pixel++) = static_cast<unsigned char>(t * 255.0f);
*(pixel++) = 0;
*(pixel++) = 0;
}
}
fseek(outfp, startOfImage + sizeof(unsigned char) * minfo.width * startRow * 3, SEEK_SET);
fwrite(rgb, sizeof(unsigned char) * minfo.width * (endRow - startRow) * 3, 1, outfp);
}
__global__ void scanRow(const MandelbrotInfo minfo, const int startRow, int * pPixels)
{
const int row = blockIdx.x + startRow;
const float dx = minfo.xMax - minfo.xMin;
const float dy = minfo.yMax - minfo.yMin;
const float yVal = static_cast<float>(row) / static_cast<float>(minfo.height - 1);
int * pixels = pPixels + blockIdx.x * minfo.width;
for (int p = threadIdx.x; p < minfo.width; p += blockDim.x)
{
int iter = 0;
float z, zi, mag;
const float xVal = static_cast<float>(p) / static_cast<float>(minfo.width - 1);
z = zi = mag = 0.0f;
const float x = minfo.xMin + dx * xVal;
const float y = minfo.yMin + dy * yVal;
for (iter = 0; mag < 4.0f && iter <= minfo.maxIters; ++iter)
{
const float t = z * z - zi * zi + x;
zi = 2.0f * z * zi + y;
z = t;
mag = z * z + zi * zi;
}
pixels[p] = --iter;
}
}
__host__ int main(int argc, char ** argv)
{
if (argc != 3)
{
fprintf(stderr, "Usage: %s <input_file> <output_file>\n", argv[0]);
fflush(stderr);
return 1;
}
int startOfImage;
MandelbrotInfo minfo;
readInputFile(argv[1], minfo);
FILE * outfp = fopen(argv[2], "wb");
if (!outfp)
{
fprintf(stderr, "Error, couldn't open %s for writing.\n", argv[2]);
fflush(stderr);
return 1;
}
unsigned char * rgb = new unsigned char[3 * minfo.width * minfo.maxRows];
int * pixels, * gpuPixels;
double t = 0.0, t0 = 0.0, t1 = 0.0, t2;
hipMalloc (reinterpret_cast<void ** >(&gpuPixels), sizeof(int) * minfo.width * minfo.maxRows); CHECK_ERROR();
hipHostMalloc(reinterpret_cast<void ** >(&pixels), sizeof(int) * minfo.width * minfo.maxRows); CHECK_ERROR();
fprintf(outfp, "P6\n%d %d\n255\n%n", minfo.width, minfo.height, &startOfImage);
t = wallTime();
int row = 0;
while (row < minfo.height)
{
uint3 gs = { ::min(row + minfo.maxRows, minfo.height) - row, 1, 1 };
uint3 bs = { 256, 1, 1 };
t2 = wallTime();
hipLaunchKernelGGL(( scanRow), dim3(gs), dim3(bs), 0, 0, minfo, row, gpuPixels); CHECK_ERROR();
hipDeviceSynchronize(); CHECK_ERROR();
t2 = wallTime() - t2;
t0 += t2;
t2 = wallTime();
hipMemcpy(pixels, gpuPixels, sizeof(int) * minfo.width * minfo.maxRows, hipMemcpyDeviceToHost); CHECK_ERROR();
t2 = wallTime() - t2;
t1 += t2;
storeRows(outfp, startOfImage, row, gs.x, minfo, pixels, rgb);
row += minfo.maxRows;
}
t = wallTime() - t;
fclose(outfp);
hipFree(gpuPixels);
hipHostFree(pixels);
delete [] rgb;
// printf("took %f seconds in kernel, %f seconds in memcpy.\n", t0, t1);
printf("done, took %f seconds.\n", t);
return 0;
}
| 245583ecbc4ed49d8c66df4fb947eeb3175deb5d.cu | #include <cuda.h>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <algorithm>
#include <cerrno>
#include <sys/time.h>
#define CHECK_ERROR() \
{ \
cudaError_t err = cudaGetLastError(); \
if (err != cudaSuccess) \
{ \
fprintf(stderr, "%s.%s.%d: %s.\n", __FILE__, __FUNCTION__, __LINE__, cudaGetErrorString(err)); \
fflush(stderr); \
exit(1); \
} \
} \
typedef struct _MandelbrotInfo
{
int width, height, maxRows, maxIters;
float xMin, xMax, yMin, yMax;
} MandelbrotInfo;
double wallTime()
{
struct timeval tv;
gettimeofday(&tv, 0);
return static_cast<double>(tv.tv_sec) + static_cast<double>(tv.tv_usec) / 1000000.0;
}
void readInputFile(const char * const input, MandelbrotInfo & minfo)
{
FILE * fp = fopen(input, "r");
char line[2048];
if (!fp)
{
fprintf(stderr, "Error, couldn't open file '%s' for reading.\n", input);
fflush(stderr);
exit(1);
}
while (fgets(line, 2047, fp))
{
char * ptr = line;
while (*ptr && *ptr <= ' ') ++ptr;
if (*ptr == '#') continue;
char * end = ptr + strlen(ptr) - 1;
while (end >= ptr && *end <= ' ') --end;
*(end + 1) = 0;
char var[1024];
int ival;
float fval;
sscanf(ptr, "%s = %d", var, &ival);
sscanf(ptr, "%s = %f", var, &fval);
if (strcmp(var, "width") == 0) minfo.width = ival;
else if (strcmp(var, "height") == 0) minfo.height = ival;
else if (strcmp(var, "maxRows") == 0) minfo.maxRows = ival;
else if (strcmp(var, "maxIters") == 0) minfo.maxIters = ival;
else if (strcmp(var, "xmin") == 0) minfo.xMin = fval;
else if (strcmp(var, "xmax") == 0) minfo.xMax = fval;
else if (strcmp(var, "ymin") == 0) minfo.yMin = fval;
else if (strcmp(var, "ymax") == 0) minfo.yMax = fval;
else
{
fprintf(stderr, "Warning, skipping invalid variable in input file (%s).\n", var);
fflush(stderr);
}
}
fclose(fp);
}
__global__ void scanRow(const MandelbrotInfo minfo, int * pPixels)
{
const int row = blockIdx.x;
const float dx = minfo.xMax - minfo.xMin;
const float dy = minfo.yMax - minfo.yMin;
const float yVal = static_cast<float>(row) / static_cast<float>(minfo.height - 1);
int * pixels = pPixels + row * minfo.width;
for (int p = threadIdx.x; p < minfo.width; p += blockDim.x)
{
int iter;
float z, zi, mag;
const float xVal = static_cast<float>(p) / static_cast<float>(minfo.width - 1);
z = zi = mag = 0.0f;
const float x = minfo.xMin + dx * xVal;
const float y = minfo.yMin + dy * yVal;
for (iter = 0; mag < 4.0f && iter <= minfo.maxIters; ++iter)
{
const float t = z * z - zi * zi + x;
zi = 2.0f * z * zi + y;
z = t;
mag = z * z + zi * zi;
}
pixels[p] = --iter;
}
}
void storeRows(FILE * outfp, const int startOfImage,
const int startRow, const int endRow, const MandelbrotInfo & minfo,
const int * const pixels, unsigned char * const rgb)
{
unsigned char * pixel = rgb;
for (int row = startRow; row < endRow; ++row)
{
const int * rowp = pixels + (row - startRow) * minfo.width;
for (int i = 0; i < minfo.width; ++i)
{
float t = 0.0f;
if (rowp[i] == 0)
{
t = 0.0f;
}
else if (rowp[i] < 16)
{
t = 0.75f * (static_cast<float>(rowp[i]) - 1.0f) / 14.0f;
}
else
{
t = 0.75f + 0.25f * (static_cast<float>(rowp[i]) - 16.0f) / static_cast<float>(minfo.maxIters - 16);
}
*(pixel++) = static_cast<unsigned char>(t * 255.0f);
*(pixel++) = 0;
*(pixel++) = 0;
}
}
fseek(outfp, startOfImage + sizeof(unsigned char) * minfo.width * startRow * 3, SEEK_SET);
fwrite(rgb, sizeof(unsigned char) * minfo.width * (endRow - startRow) * 3, 1, outfp);
}
__global__ void scanRow(const MandelbrotInfo minfo, const int startRow, int * pPixels)
{
const int row = blockIdx.x + startRow;
const float dx = minfo.xMax - minfo.xMin;
const float dy = minfo.yMax - minfo.yMin;
const float yVal = static_cast<float>(row) / static_cast<float>(minfo.height - 1);
int * pixels = pPixels + blockIdx.x * minfo.width;
for (int p = threadIdx.x; p < minfo.width; p += blockDim.x)
{
int iter = 0;
float z, zi, mag;
const float xVal = static_cast<float>(p) / static_cast<float>(minfo.width - 1);
z = zi = mag = 0.0f;
const float x = minfo.xMin + dx * xVal;
const float y = minfo.yMin + dy * yVal;
for (iter = 0; mag < 4.0f && iter <= minfo.maxIters; ++iter)
{
const float t = z * z - zi * zi + x;
zi = 2.0f * z * zi + y;
z = t;
mag = z * z + zi * zi;
}
pixels[p] = --iter;
}
}
__host__ int main(int argc, char ** argv)
{
if (argc != 3)
{
fprintf(stderr, "Usage: %s <input_file> <output_file>\n", argv[0]);
fflush(stderr);
return 1;
}
int startOfImage;
MandelbrotInfo minfo;
readInputFile(argv[1], minfo);
FILE * outfp = fopen(argv[2], "wb");
if (!outfp)
{
fprintf(stderr, "Error, couldn't open %s for writing.\n", argv[2]);
fflush(stderr);
return 1;
}
unsigned char * rgb = new unsigned char[3 * minfo.width * minfo.maxRows];
int * pixels, * gpuPixels;
double t = 0.0, t0 = 0.0, t1 = 0.0, t2;
cudaMalloc (reinterpret_cast<void ** >(&gpuPixels), sizeof(int) * minfo.width * minfo.maxRows); CHECK_ERROR();
cudaMallocHost(reinterpret_cast<void ** >(&pixels), sizeof(int) * minfo.width * minfo.maxRows); CHECK_ERROR();
fprintf(outfp, "P6\n%d %d\n255\n%n", minfo.width, minfo.height, &startOfImage);
t = wallTime();
int row = 0;
while (row < minfo.height)
{
uint3 gs = { std::min(row + minfo.maxRows, minfo.height) - row, 1, 1 };
uint3 bs = { 256, 1, 1 };
t2 = wallTime();
scanRow<<<gs, bs>>>(minfo, row, gpuPixels); CHECK_ERROR();
cudaThreadSynchronize(); CHECK_ERROR();
t2 = wallTime() - t2;
t0 += t2;
t2 = wallTime();
cudaMemcpy(pixels, gpuPixels, sizeof(int) * minfo.width * minfo.maxRows, cudaMemcpyDeviceToHost); CHECK_ERROR();
t2 = wallTime() - t2;
t1 += t2;
storeRows(outfp, startOfImage, row, gs.x, minfo, pixels, rgb);
row += minfo.maxRows;
}
t = wallTime() - t;
fclose(outfp);
cudaFree(gpuPixels);
cudaFreeHost(pixels);
delete [] rgb;
// printf("took %f seconds in kernel, %f seconds in memcpy.\n", t0, t1);
printf("done, took %f seconds.\n", t);
return 0;
}
|
ab4388140aac69653f8e1d2b1298ea3fd255c6dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<iostream>
#include<conio.h>
using namespace std;
__global__ void Helloword(void)
{
printf("Hello Word\n");
}
int main()
{
hipLaunchKernelGGL(( Helloword) , dim3(1), dim3(1000) , 0, 0, );
hipDeviceReset();
getch();
return 0;
}
| ab4388140aac69653f8e1d2b1298ea3fd255c6dd.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<iostream>
#include<conio.h>
using namespace std;
__global__ void Helloword(void)
{
printf("Hello Word\n");
}
int main()
{
Helloword <<< 1, 1000 >>>();
cudaDeviceReset();
getch();
return 0;
}
|
14d1623f369f3df43ef2ec8d5f7b6cfe5355c3de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Based on CUDA SDK template from NVIDIA
// sgm algorithm adapted from http://lunokhod.org/?p=1403
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
#include <stdlib.h>
#include <limits>
#include <algorithm>
// includes, project
#include <cutil_inline.h>
#define MMAX_BRIGHTNESS 255
#define PENALTY1 15
#define PENALTY2 100
#define COSTS(i,j,d) costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define ACCUMULATED_COSTS(i,j,d) accumulated_costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define LEFT_IMAGE(i,j) left_image[(i)+(j)*nx]
#define RIGHT_IMAGE(i,j) right_image[(i)+(j)*nx]
#define DISP_IMAGE(i,j) disp_image[(i)+(j)*nx]
#define MMAX(a,b) (((a)>(b))?(a):(b))
#define MMIN(a,b) (((a)<(b))?(a):(b))
#define MYASSERT(condition) if (!(condition)) return;
/* function headers */
void determine_costs(const int *left_image, const int *right_image, int *costs, const int nx, const int ny, const int disp_range);
void evaluate_path( const int *prior, const int* local,
int path_intensity_gradient, int *curr_cost,
const int nx, const int ny, const int disp_range );
__device__ void devEvaluate_path(const int *prior, const int *local, int path_intensity_gradient, int *curr_cost , const int nx, const int ny, const int disp_range);
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range ) ;
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range ) ;
int find_min_index( const int *v, const int dist_range ) ;
void create_disparity_view( const int *accumulated_costs , int * disp_image, int nx, int ny) ;
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range );
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range );
void usage(char *command);
/* functions code */
void determine_costs(const int *left_image, const int *right_image, int *costs, const int nx, const int ny, const int disp_range){
std::fill(costs, costs+nx*ny*disp_range, 255u);
for ( int j = 0; j < ny; j++ ) {
for ( int d = 0; d < disp_range; d++ ) {
for ( int i = d; i < nx; i++ ) {
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
}
__global__ void devDetermine_costs(const int *left_image, const int *right_image, int *costs, const int nx, const int ny, const int disp_range) {
int j = threadIdx.y + blockIdx.y * blockDim.y; // verificar se j < ny
int i = threadIdx.x + blockIdx.x * blockDim.x; //verificar se i < nx
int d = threadIdx.z + blockIdx.z * blockDim.z; //verificar se d < disp_range
//int id = i + j * nx;
if(i<nx && j< ny){
COSTS(i,j,d)=255;
if (i >=d){
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
//////////////////////////////////////////////////////////////////////////////////
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range ) {
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = 0; i < WIDTH; i++ ) {
if(i==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)) ,
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
}
}
}
__global__ void devIterate_direction_dirxpos(const int dirx, const int *left_image, const int* costs, int *accumulated_costs, const int nx, const int ny, const int disp_range ) {
const int WIDTH = nx;
//const int HEIGHT = ny;
int j = blockIdx.y;
int d = threadIdx.x;
if(j < ny && d < disp_range){
for ( int i = 0; i < WIDTH; i++ ) {
//if(i<WIDTH /*&& j< HEIGHT*/) {
if(i==0) {
//for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
//}
}
else {
devEvaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0), &COSTS(i,j,0), abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)), &ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
__syncthreads();
//}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////7
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
__global__ void devIterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
//const int WIDTH = nx;
const int HEIGHT = ny;
int i = blockIdx.y;
int d = threadIdx.x;
if(i < nx && d < disp_range)
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
//for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
//}
}
else {
devEvaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////////////////////7
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
__global__ void devIterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
int j = blockIdx.y;
int d = threadIdx.x;
if (j<ny && d < disp_range){
//if(i >=0 && i<=WIDTH-1 /*&& j< HEIGHT*/) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
//for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
//}
}
else {
devEvaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
__syncthreads();
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
}
}
__global__ void devIterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int HEIGHT = ny;
int i = blockIdx.y;
int d = threadIdx.x;
//int j = blockIdx.y*blockDim.y + threadIdx.y;
//for ( int i = 0; i < WIDTH; i++ ) {
if(i < nx && d < disp_range){
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
//for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
//}
}
else {
devEvaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
__syncthreads();
}
}
}
////////////////////////////////////////////////////////////////////////////////////
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
}
void devIterate_direction( const int dirx, const int diry, const int *devLeft_image,
const int* devCosts, int *devAccumulated_costs,
const int nx, const int ny, const int disp_range )
{
// Walk along the edges in a clockwise fashion
int block_x = disp_range;
int block_y = 1;
dim3 dimBlock(block_x,block_y);
dim3 dimGridy(1,nx);
dim3 dimGridx(1,ny);
//int *devLeft_image, *devCosts, *devAccumulated_costs;
//hipMalloc((void **)&devLeft_image, nx*ny*sizeof(int));
//hipMalloc((void **)&devCosts, nx*ny*sizeof(int)*disp_range);
//hipMalloc((void **)&devAccumulated_costs, nx*ny*sizeof(int)*disp_range);
//hipMemcpy(devLeft_image, left_image, nx*ny*sizeof(int), hipMemcpyHostToDevice);
//hipMemcpy(devCosts, costs, nx*ny*sizeof(int)*disp_range, hipMemcpyHostToDevice);
//hipMemcpy(devAccumulated_costs, accumulated_costs, nx*ny*sizeof(int)*disp_range, hipMemcpyHostToDevice);
if ( dirx > 0 ) {
hipLaunchKernelGGL(( devIterate_direction_dirxpos), dim3(dimGridx), dim3(dimBlock), 0, 0, dirx,devLeft_image,devCosts,devAccumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
hipLaunchKernelGGL(( devIterate_direction_dirypos), dim3(dimGridy), dim3(dimBlock), 0, 0, diry,devLeft_image,devCosts,devAccumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
hipLaunchKernelGGL(( devIterate_direction_dirxneg), dim3(dimGridx), dim3(dimBlock), 0, 0, dirx,devLeft_image,devCosts,devAccumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
hipLaunchKernelGGL(( devIterate_direction_diryneg), dim3(dimGridy), dim3(dimBlock), 0, 0, diry,devLeft_image,devCosts,devAccumulated_costs, nx, ny, disp_range);
}
//hipMemcpy(accumulated_costs, devAccumulated_costs, nx*ny*sizeof(int)*disp_range, hipMemcpyDeviceToHost);
/* libertar a memoria do device que ja nao vai ser usada */
//hipFree(devCosts);
//hipFree(devLeft_image);
//hipFree(devAccumulated_costs);
}
// ADD two cost images
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int *im1_init = im1;
while ( im1 != (im1_init + (nx*ny*disp_range)) ) {
*im1 += *im2;
im1++;
im2++;
}
}
__global__ void devInplace_sum_views( int * im1, const int * im2, const int nx, const int ny, const int disp_range ) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
MYASSERT(x < nx);
int y = threadIdx.y + blockIdx.y * blockDim.y;
MYASSERT(y < ny);
int z = threadIdx.z + blockIdx.z * blockDim.z;
MYASSERT(z < disp_range);
int id = x + y * nx + z * nx * ny;
im1[id] += im2[id];
}
int find_min_index( const int *v, const int disp_range )
{
int min = std::numeric_limits<int>::max();
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
__device__ int devFind_min_index( const int *v, const int disp_range )
{
int min =2147483647;
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
void evaluate_path(const int *prior, const int *local,
int path_intensity_gradient, int *curr_cost ,
const int nx, const int ny, const int disp_range)
{
memcpy(curr_cost, local, sizeof(int)*disp_range);
for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = std::numeric_limits<int>::max();
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
}
int min = std::numeric_limits<int>::max();
for ( int d = 0; d < disp_range; d++ ) {
if (prior[d]<min) min=prior[d];
}
for ( int d = 0; d < disp_range; d++ ) {
curr_cost[d]-=min;
}
}
__device__ void devEvaluate_path(const int *prior, const int *local, int path_intensity_gradient, int *curr_cost , const int nx, const int ny, const int disp_range)
{
int d = threadIdx.x;
curr_cost[d] = local[d];
//__syncthreads();
//for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = 2147483647;
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
__syncthreads();
//}
int min = 2147483647;
for ( int k = 0; k < disp_range; k++ ) {
if (prior[k]<min) min=prior[k];
}
//for ( int d = 0; d < disp_range; d++ ) {
//__syncthreads();
curr_cost[d]-=min;
//}
}
void create_disparity_view( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range)
{
for ( int j = 0; j < ny; j++ ) {
for ( int i = 0; i < nx; i++ ) {
DISP_IMAGE(i,j) = 4 * find_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
}
}
__global__ void devCreate_disparity_view( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range)
{
int j = threadIdx.y + blockIdx.y * blockDim.y; // verificar se j < ny
int i = threadIdx.x + blockIdx.x * blockDim.x; //verificar se i < nx
if(i<nx && j< ny)
{
DISP_IMAGE(i,j) = 4 * devFind_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
}
/*
* Links:
* http://www.dlr.de/rmc/rm/en/desktopdefault.aspx/tabid-9389/16104_read-39811/
* http://lunokhod.org/?p=1356
*/
// sgm code to run on the host
void sgmHost( const int *h_leftIm, const int *h_rightIm, int *h_dispIm, const int w, const int h, const int disp_range) {
const int nx = w;
const int ny = h;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
free(costs);
free(dir_accumulated_costs);
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
free(accumulated_costs);
}
// sgm code to run on the GPU
void sgmDevice( const int *h_leftIm, const int *h_rightIm, int *h_dispIm, const int w, const int h, const int disp_range ){
const int nx = w;
const int ny = h;
/*adicionado */
const int size = nx * ny;
const int memsize = size * sizeof(int);
/*cuda arrays costs */
int *devCosts, *devAccumulated_costs;
hipMalloc((void **)&devCosts, memsize*disp_range);
/*cuda imagens */
int *devH_leftIm, *devH_rightIm, *devH_dispIm;
hipMalloc((void **)&devH_leftIm, memsize);
hipMalloc((void **)&devH_rightIm, memsize);
hipMalloc((void **)&devH_dispIm, memsize);
///////////////////////////////*adicionado*/ /*tarefa3*//////////////// e tarefa 1 ///////////////
int block3_x = 4;
int block3_y = 4;
int block3_z = disp_range; // 512
int grid3_x = ceil((float)nx/block3_x);
int grid3_y = ceil((float)ny/block3_y);
// int grid_z = ceil((float)disp_range/block_z);
dim3 dimBlock3(block3_x,block3_y, block3_z);
dim3 dimGrid3(grid3_x,grid3_y,1);
/////////////////////77/*adicionado*/ /*tarefa4*////////////////////////////////////
int block4_x = 16;
int block4_y = 32;
int grid4_x = ceil((float)nx/block4_x);
int grid4_y = ceil((float)ny/block4_y);
dim3 dimBlock4(block4_x,block4_y);
dim3 dimGrid4(grid4_x,grid4_y);
/*copias com cudaMemCopy as imagens para o ponteiro de cuda*/
hipMemcpy(devH_leftIm, h_leftIm, memsize, hipMemcpyHostToDevice);
hipMemcpy(devH_rightIm, h_rightIm, memsize, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( devDetermine_costs), dim3(dimGrid3), dim3(dimBlock3), 0, 0, devH_leftIm, devH_rightIm, devCosts, nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
hipMalloc((void **)&devAccumulated_costs, memsize*disp_range); //tarefa 3
// int *devAccumulated_costs;
int *devDir_accumulated_costs;
//hipMalloc((void **)&devAccumulated_costs, memsize*disp_range);
hipMalloc((void **)&devDir_accumulated_costs, memsize*disp_range);
int dirx=0,diry=0;
hipMemcpy(devAccumulated_costs, accumulated_costs, memsize*disp_range, hipMemcpyHostToDevice);
hipMemcpy(devDir_accumulated_costs, dir_accumulated_costs, memsize*disp_range, hipMemcpyHostToDevice);
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
hipMemset(devDir_accumulated_costs, 0, nx*ny*disp_range*sizeof(int));
devIterate_direction( dirx,diry, devH_leftIm, devCosts, devDir_accumulated_costs, nx, ny, disp_range);
hipLaunchKernelGGL(( devInplace_sum_views), dim3(dimGrid3), dim3(dimBlock3), 0, 0, devAccumulated_costs, devDir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
hipMemset(devDir_accumulated_costs, 0, nx*ny*disp_range*sizeof(int));
devIterate_direction( dirx,diry, devH_leftIm, devCosts, devDir_accumulated_costs, nx, ny, disp_range);
hipLaunchKernelGGL(( devInplace_sum_views), dim3(dimGrid3), dim3(dimBlock3), 0, 0, devAccumulated_costs, devDir_accumulated_costs, nx, ny, disp_range);
}
//hipMemcpy(devAccumulated_costs, accumulated_costs, memsize*disp_range, hipMemcpyHostToDevice);
//hipMemcpy(accumulated_costs, devAccumulated_costs, memsize*disp_range, hipMemcpyDeviceToHost);
//free(dir_accumulated_costs);
hipLaunchKernelGGL(( devCreate_disparity_view), dim3(dimGrid4), dim3(dimBlock4), 0, 0, devAccumulated_costs, devH_dispIm, nx, ny, disp_range ); //pq temos que fazer isto?
hipMemcpy(h_dispIm , devH_dispIm, memsize, hipMemcpyDeviceToHost); // tarefa 3
hipFree(devAccumulated_costs);
hipFree(devH_dispIm);
//free(accumulated_costs);
hipFree(devDir_accumulated_costs);
/* libertar a memoria do device que ja nao vai ser usada */
hipFree(devCosts);
hipFree(devH_leftIm);
hipFree(devH_rightIm);
}
//-----------------------------------------------------------
// print command line format
void usage(char *command)
{
printf("Usage: %s [-h] [-d device] [-l leftimage] [-r rightimage] [-o dev_dispimage] [-t host_dispimage] [-p disprange] \n",command);
}
// main
int main( int argc, char** argv)
{
// default command line options
int deviceId = 0;
int disp_range = 32;
char *leftIn =(char *)"lbull.pgm",
*rightIn =(char *)"rbull.pgm",
*fileOut =(char *)"d_dbull.pgm",
*referenceOut=(char *)"h_dbull.pgm";
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:l:o:r:t:p:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'l': // left image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
leftIn = strdup(optarg);
break;
case 'r': // right image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
rightIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 't': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'p': // disp_range
if(sscanf(optarg,"%d",&disp_range)==0)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
if(optind < argc) {
fprintf(stderr,"Error in arguments\n");
usage(argv[0]);
exit(1);
}
// select cuda device
cutilSafeCall( hipSetDevice( deviceId ) );
// create events to measure host sgm time and device sgm time
hipEvent_t startH, stopH, startD, stopD;
hipEventCreate(&startH);
hipEventCreate(&stopH);
hipEventCreate(&startD);
hipEventCreate(&stopD);
// allocate host memory
int* h_ldata=NULL;
int* h_rdata=NULL;
unsigned int h,w;
//load left pgm
if (cutLoadPGMi(leftIn, (unsigned int **)&h_ldata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", leftIn);
exit(1);
}
//load right pgm
if (cutLoadPGMi(rightIn, (unsigned int **)&h_rdata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", rightIn);
exit(1);
}
// allocate mem for the result on host side
int* h_odata = (int*) malloc( h*w*sizeof(int));
int* reference = (int*) malloc( h*w*sizeof(int));
// sgm at host
hipEventRecord( startH, 0 );
sgmHost(h_ldata, h_rdata, reference, w, h, disp_range);
hipEventRecord( stopH, 0 );
hipEventSynchronize( stopH );
// sgm at GPU
hipEventRecord( startD, 0 );
sgmDevice(h_ldata, h_rdata, h_odata, w, h, disp_range);
hipEventRecord( stopD, 0 );
hipEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
hipEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
hipEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_ldata);
cutFree( h_rdata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
| 14d1623f369f3df43ef2ec8d5f7b6cfe5355c3de.cu |
// Based on CUDA SDK template from NVIDIA
// sgm algorithm adapted from http://lunokhod.org/?p=1403
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
#include <stdlib.h>
#include <limits>
#include <algorithm>
// includes, project
#include <cutil_inline.h>
#define MMAX_BRIGHTNESS 255
#define PENALTY1 15
#define PENALTY2 100
#define COSTS(i,j,d) costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define ACCUMULATED_COSTS(i,j,d) accumulated_costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define LEFT_IMAGE(i,j) left_image[(i)+(j)*nx]
#define RIGHT_IMAGE(i,j) right_image[(i)+(j)*nx]
#define DISP_IMAGE(i,j) disp_image[(i)+(j)*nx]
#define MMAX(a,b) (((a)>(b))?(a):(b))
#define MMIN(a,b) (((a)<(b))?(a):(b))
#define MYASSERT(condition) if (!(condition)) return;
/* function headers */
void determine_costs(const int *left_image, const int *right_image, int *costs, const int nx, const int ny, const int disp_range);
void evaluate_path( const int *prior, const int* local,
int path_intensity_gradient, int *curr_cost,
const int nx, const int ny, const int disp_range );
__device__ void devEvaluate_path(const int *prior, const int *local, int path_intensity_gradient, int *curr_cost , const int nx, const int ny, const int disp_range);
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range ) ;
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range ) ;
int find_min_index( const int *v, const int dist_range ) ;
void create_disparity_view( const int *accumulated_costs , int * disp_image, int nx, int ny) ;
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range );
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range );
void usage(char *command);
/* functions code */
void determine_costs(const int *left_image, const int *right_image, int *costs, const int nx, const int ny, const int disp_range){
std::fill(costs, costs+nx*ny*disp_range, 255u);
for ( int j = 0; j < ny; j++ ) {
for ( int d = 0; d < disp_range; d++ ) {
for ( int i = d; i < nx; i++ ) {
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
}
__global__ void devDetermine_costs(const int *left_image, const int *right_image, int *costs, const int nx, const int ny, const int disp_range) {
int j = threadIdx.y + blockIdx.y * blockDim.y; // verificar se j < ny
int i = threadIdx.x + blockIdx.x * blockDim.x; //verificar se i < nx
int d = threadIdx.z + blockIdx.z * blockDim.z; //verificar se d < disp_range
//int id = i + j * nx;
if(i<nx && j< ny){
COSTS(i,j,d)=255;
if (i >=d){
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
//////////////////////////////////////////////////////////////////////////////////
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range ) {
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = 0; i < WIDTH; i++ ) {
if(i==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)) ,
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
}
}
}
__global__ void devIterate_direction_dirxpos(const int dirx, const int *left_image, const int* costs, int *accumulated_costs, const int nx, const int ny, const int disp_range ) {
const int WIDTH = nx;
//const int HEIGHT = ny;
int j = blockIdx.y;
int d = threadIdx.x;
if(j < ny && d < disp_range){
for ( int i = 0; i < WIDTH; i++ ) {
//if(i<WIDTH /*&& j< HEIGHT*/) {
if(i==0) {
//for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
//}
}
else {
devEvaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0), &COSTS(i,j,0), abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)), &ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
__syncthreads();
//}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////7
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
__global__ void devIterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
//const int WIDTH = nx;
const int HEIGHT = ny;
int i = blockIdx.y;
int d = threadIdx.x;
if(i < nx && d < disp_range)
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
//for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
//}
}
else {
devEvaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////////////////////7
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
__global__ void devIterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
int j = blockIdx.y;
int d = threadIdx.x;
if (j<ny && d < disp_range){
//if(i >=0 && i<=WIDTH-1 /*&& j< HEIGHT*/) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
//for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
//}
}
else {
devEvaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
__syncthreads();
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
}
}
__global__ void devIterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int HEIGHT = ny;
int i = blockIdx.y;
int d = threadIdx.x;
//int j = blockIdx.y*blockDim.y + threadIdx.y;
//for ( int i = 0; i < WIDTH; i++ ) {
if(i < nx && d < disp_range){
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
//for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
//}
}
else {
devEvaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
__syncthreads();
}
}
}
////////////////////////////////////////////////////////////////////////////////////
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
}
void devIterate_direction( const int dirx, const int diry, const int *devLeft_image,
const int* devCosts, int *devAccumulated_costs,
const int nx, const int ny, const int disp_range )
{
// Walk along the edges in a clockwise fashion
int block_x = disp_range;
int block_y = 1;
dim3 dimBlock(block_x,block_y);
dim3 dimGridy(1,nx);
dim3 dimGridx(1,ny);
//int *devLeft_image, *devCosts, *devAccumulated_costs;
//cudaMalloc((void **)&devLeft_image, nx*ny*sizeof(int));
//cudaMalloc((void **)&devCosts, nx*ny*sizeof(int)*disp_range);
//cudaMalloc((void **)&devAccumulated_costs, nx*ny*sizeof(int)*disp_range);
//cudaMemcpy(devLeft_image, left_image, nx*ny*sizeof(int), cudaMemcpyHostToDevice);
//cudaMemcpy(devCosts, costs, nx*ny*sizeof(int)*disp_range, cudaMemcpyHostToDevice);
//cudaMemcpy(devAccumulated_costs, accumulated_costs, nx*ny*sizeof(int)*disp_range, cudaMemcpyHostToDevice);
if ( dirx > 0 ) {
devIterate_direction_dirxpos<<<dimGridx, dimBlock>>> (dirx,devLeft_image,devCosts,devAccumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
devIterate_direction_dirypos<<<dimGridy, dimBlock>>> (diry,devLeft_image,devCosts,devAccumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
devIterate_direction_dirxneg<<<dimGridx, dimBlock>>> (dirx,devLeft_image,devCosts,devAccumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
devIterate_direction_diryneg<<<dimGridy, dimBlock>>> (diry,devLeft_image,devCosts,devAccumulated_costs, nx, ny, disp_range);
}
//cudaMemcpy(accumulated_costs, devAccumulated_costs, nx*ny*sizeof(int)*disp_range, cudaMemcpyDeviceToHost);
/* libertar a memoria do device que ja nao vai ser usada */
//cudaFree(devCosts);
//cudaFree(devLeft_image);
//cudaFree(devAccumulated_costs);
}
// ADD two cost images
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int *im1_init = im1;
while ( im1 != (im1_init + (nx*ny*disp_range)) ) {
*im1 += *im2;
im1++;
im2++;
}
}
__global__ void devInplace_sum_views( int * im1, const int * im2, const int nx, const int ny, const int disp_range ) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
MYASSERT(x < nx);
int y = threadIdx.y + blockIdx.y * blockDim.y;
MYASSERT(y < ny);
int z = threadIdx.z + blockIdx.z * blockDim.z;
MYASSERT(z < disp_range);
int id = x + y * nx + z * nx * ny;
im1[id] += im2[id];
}
int find_min_index( const int *v, const int disp_range )
{
int min = std::numeric_limits<int>::max();
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
__device__ int devFind_min_index( const int *v, const int disp_range )
{
int min =2147483647;
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
void evaluate_path(const int *prior, const int *local,
int path_intensity_gradient, int *curr_cost ,
const int nx, const int ny, const int disp_range)
{
memcpy(curr_cost, local, sizeof(int)*disp_range);
for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = std::numeric_limits<int>::max();
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
}
int min = std::numeric_limits<int>::max();
for ( int d = 0; d < disp_range; d++ ) {
if (prior[d]<min) min=prior[d];
}
for ( int d = 0; d < disp_range; d++ ) {
curr_cost[d]-=min;
}
}
__device__ void devEvaluate_path(const int *prior, const int *local, int path_intensity_gradient, int *curr_cost , const int nx, const int ny, const int disp_range)
{
int d = threadIdx.x;
curr_cost[d] = local[d];
//__syncthreads();
//for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = 2147483647;
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
__syncthreads();
//}
int min = 2147483647;
for ( int k = 0; k < disp_range; k++ ) {
if (prior[k]<min) min=prior[k];
}
//for ( int d = 0; d < disp_range; d++ ) {
//__syncthreads();
curr_cost[d]-=min;
//}
}
void create_disparity_view( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range)
{
for ( int j = 0; j < ny; j++ ) {
for ( int i = 0; i < nx; i++ ) {
DISP_IMAGE(i,j) = 4 * find_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
}
}
__global__ void devCreate_disparity_view( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range)
{
int j = threadIdx.y + blockIdx.y * blockDim.y; // verificar se j < ny
int i = threadIdx.x + blockIdx.x * blockDim.x; //verificar se i < nx
if(i<nx && j< ny)
{
DISP_IMAGE(i,j) = 4 * devFind_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
}
/*
* Links:
* http://www.dlr.de/rmc/rm/en/desktopdefault.aspx/tabid-9389/16104_read-39811/
* http://lunokhod.org/?p=1356
*/
// sgm code to run on the host
void sgmHost( const int *h_leftIm, const int *h_rightIm, int *h_dispIm, const int w, const int h, const int disp_range) {
const int nx = w;
const int ny = h;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
free(costs);
free(dir_accumulated_costs);
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
free(accumulated_costs);
}
// sgm code to run on the GPU
void sgmDevice( const int *h_leftIm, const int *h_rightIm, int *h_dispIm, const int w, const int h, const int disp_range ){
const int nx = w;
const int ny = h;
/*adicionado */
const int size = nx * ny;
const int memsize = size * sizeof(int);
/*cuda arrays costs */
int *devCosts, *devAccumulated_costs;
cudaMalloc((void **)&devCosts, memsize*disp_range);
/*cuda imagens */
int *devH_leftIm, *devH_rightIm, *devH_dispIm;
cudaMalloc((void **)&devH_leftIm, memsize);
cudaMalloc((void **)&devH_rightIm, memsize);
cudaMalloc((void **)&devH_dispIm, memsize);
///////////////////////////////*adicionado*/ /*tarefa3*//////////////// e tarefa 1 ///////////////
int block3_x = 4;
int block3_y = 4;
int block3_z = disp_range; // 512
int grid3_x = ceil((float)nx/block3_x);
int grid3_y = ceil((float)ny/block3_y);
// int grid_z = ceil((float)disp_range/block_z);
dim3 dimBlock3(block3_x,block3_y, block3_z);
dim3 dimGrid3(grid3_x,grid3_y,1);
/////////////////////77/*adicionado*/ /*tarefa4*////////////////////////////////////
int block4_x = 16;
int block4_y = 32;
int grid4_x = ceil((float)nx/block4_x);
int grid4_y = ceil((float)ny/block4_y);
dim3 dimBlock4(block4_x,block4_y);
dim3 dimGrid4(grid4_x,grid4_y);
/*copias com cudaMemCopy as imagens para o ponteiro de cuda*/
cudaMemcpy(devH_leftIm, h_leftIm, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(devH_rightIm, h_rightIm, memsize, cudaMemcpyHostToDevice);
devDetermine_costs<<<dimGrid3, dimBlock3>>>(devH_leftIm, devH_rightIm, devCosts, nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
cudaMalloc((void **)&devAccumulated_costs, memsize*disp_range); //tarefa 3
// int *devAccumulated_costs;
int *devDir_accumulated_costs;
//cudaMalloc((void **)&devAccumulated_costs, memsize*disp_range);
cudaMalloc((void **)&devDir_accumulated_costs, memsize*disp_range);
int dirx=0,diry=0;
cudaMemcpy(devAccumulated_costs, accumulated_costs, memsize*disp_range, cudaMemcpyHostToDevice);
cudaMemcpy(devDir_accumulated_costs, dir_accumulated_costs, memsize*disp_range, cudaMemcpyHostToDevice);
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
cudaMemset(devDir_accumulated_costs, 0, nx*ny*disp_range*sizeof(int));
devIterate_direction( dirx,diry, devH_leftIm, devCosts, devDir_accumulated_costs, nx, ny, disp_range);
devInplace_sum_views<<<dimGrid3, dimBlock3>>>(devAccumulated_costs, devDir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
cudaMemset(devDir_accumulated_costs, 0, nx*ny*disp_range*sizeof(int));
devIterate_direction( dirx,diry, devH_leftIm, devCosts, devDir_accumulated_costs, nx, ny, disp_range);
devInplace_sum_views<<<dimGrid3, dimBlock3>>>(devAccumulated_costs, devDir_accumulated_costs, nx, ny, disp_range);
}
//cudaMemcpy(devAccumulated_costs, accumulated_costs, memsize*disp_range, cudaMemcpyHostToDevice);
//cudaMemcpy(accumulated_costs, devAccumulated_costs, memsize*disp_range, cudaMemcpyDeviceToHost);
//free(dir_accumulated_costs);
devCreate_disparity_view<<<dimGrid4, dimBlock4>>>( devAccumulated_costs, devH_dispIm, nx, ny, disp_range ); //pq temos que fazer isto?
cudaMemcpy(h_dispIm , devH_dispIm, memsize, cudaMemcpyDeviceToHost); // tarefa 3
cudaFree(devAccumulated_costs);
cudaFree(devH_dispIm);
//free(accumulated_costs);
cudaFree(devDir_accumulated_costs);
/* libertar a memoria do device que ja nao vai ser usada */
cudaFree(devCosts);
cudaFree(devH_leftIm);
cudaFree(devH_rightIm);
}
//-----------------------------------------------------------
// print command line format
void usage(char *command)
{
printf("Usage: %s [-h] [-d device] [-l leftimage] [-r rightimage] [-o dev_dispimage] [-t host_dispimage] [-p disprange] \n",command);
}
// main
int main( int argc, char** argv)
{
// default command line options
int deviceId = 0;
int disp_range = 32;
char *leftIn =(char *)"lbull.pgm",
*rightIn =(char *)"rbull.pgm",
*fileOut =(char *)"d_dbull.pgm",
*referenceOut=(char *)"h_dbull.pgm";
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:l:o:r:t:p:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'l': // left image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
leftIn = strdup(optarg);
break;
case 'r': // right image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
rightIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 't': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'p': // disp_range
if(sscanf(optarg,"%d",&disp_range)==0)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
if(optind < argc) {
fprintf(stderr,"Error in arguments\n");
usage(argv[0]);
exit(1);
}
// select cuda device
cutilSafeCall( cudaSetDevice( deviceId ) );
// create events to measure host sgm time and device sgm time
cudaEvent_t startH, stopH, startD, stopD;
cudaEventCreate(&startH);
cudaEventCreate(&stopH);
cudaEventCreate(&startD);
cudaEventCreate(&stopD);
// allocate host memory
int* h_ldata=NULL;
int* h_rdata=NULL;
unsigned int h,w;
//load left pgm
if (cutLoadPGMi(leftIn, (unsigned int **)&h_ldata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", leftIn);
exit(1);
}
//load right pgm
if (cutLoadPGMi(rightIn, (unsigned int **)&h_rdata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", rightIn);
exit(1);
}
// allocate mem for the result on host side
int* h_odata = (int*) malloc( h*w*sizeof(int));
int* reference = (int*) malloc( h*w*sizeof(int));
// sgm at host
cudaEventRecord( startH, 0 );
sgmHost(h_ldata, h_rdata, reference, w, h, disp_range);
cudaEventRecord( stopH, 0 );
cudaEventSynchronize( stopH );
// sgm at GPU
cudaEventRecord( startD, 0 );
sgmDevice(h_ldata, h_rdata, h_odata, w, h, disp_range);
cudaEventRecord( stopD, 0 );
cudaEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
cudaEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
cudaEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_ldata);
cutFree( h_rdata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
|
b1e4105e267c4fd8810d65534263e25a6c3d07c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <vector>
#include "timer.hpp"
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %sn", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
__global__ void add(double *a, double *b, double *c)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
a[i] = b[i] + c[i];
}
template <typename T>
void runTest(int deviceId, int blocknum)
{
T *d_a;
T *d_b;
T *d_c;
int N = blocknum*256;
std::vector<T> vec_init(N);
// NB: d_a(33*nMB) for stride case
checkCuda( hipMalloc(&d_a, N * sizeof(T)) );
checkCuda( hipMalloc(&d_b, N * sizeof(T)) );
checkCuda( hipMalloc(&d_c, N * sizeof(T)) );
checkCuda( hipMemcpy(&d_a, &(vec_init[0]), N * sizeof(T), hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(&d_b, &(vec_init[0]), N * sizeof(T), hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(&d_c, &(vec_init[0]), N * sizeof(T), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( add), dim3(blocknum), dim3(256), 0, 0, d_a, d_b, d_c); // warm up
hipDeviceSynchronize();
viennacl::tools::timer timer;
timer.start();
for (int n = 0; n < 10; ++n)
{
hipLaunchKernelGGL(( add), dim3(blocknum), dim3(256), 0, 0, d_a, d_b, d_c);
}
hipDeviceSynchronize();
double bandwidth = 30 * N * sizeof(double) / timer.get() / 1e9;
std::cout << N << " " << bandwidth << " " << timer.get() << std::endl;
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
int main(int argc, char **argv)
{
int deviceId = 0;
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, deviceId) );
std::cout << "Device: " << prop.name << std::endl;
std::cout << "#Size Bandwidth (GB/s) Time (sec)" << std::endl;
for (int blocks=1; blocks<20000; blocks *= 2)
runTest<double>(deviceId, blocks);
return EXIT_SUCCESS;
}
| b1e4105e267c4fd8810d65534263e25a6c3d07c8.cu | #include <iostream>
#include <cstdlib>
#include <vector>
#include "timer.hpp"
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %sn", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
__global__ void add(double *a, double *b, double *c)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
a[i] = b[i] + c[i];
}
template <typename T>
void runTest(int deviceId, int blocknum)
{
T *d_a;
T *d_b;
T *d_c;
int N = blocknum*256;
std::vector<T> vec_init(N);
// NB: d_a(33*nMB) for stride case
checkCuda( cudaMalloc(&d_a, N * sizeof(T)) );
checkCuda( cudaMalloc(&d_b, N * sizeof(T)) );
checkCuda( cudaMalloc(&d_c, N * sizeof(T)) );
checkCuda( cudaMemcpy(&d_a, &(vec_init[0]), N * sizeof(T), cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(&d_b, &(vec_init[0]), N * sizeof(T), cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(&d_c, &(vec_init[0]), N * sizeof(T), cudaMemcpyHostToDevice) );
add<<<blocknum, 256>>>(d_a, d_b, d_c); // warm up
cudaDeviceSynchronize();
viennacl::tools::timer timer;
timer.start();
for (int n = 0; n < 10; ++n)
{
add<<<blocknum, 256>>>(d_a, d_b, d_c);
}
cudaDeviceSynchronize();
double bandwidth = 30 * N * sizeof(double) / timer.get() / 1e9;
std::cout << N << " " << bandwidth << " " << timer.get() << std::endl;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
int main(int argc, char **argv)
{
int deviceId = 0;
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, deviceId) );
std::cout << "Device: " << prop.name << std::endl;
std::cout << "#Size Bandwidth (GB/s) Time (sec)" << std::endl;
for (int blocks=1; blocks<20000; blocks *= 2)
runTest<double>(deviceId, blocks);
return EXIT_SUCCESS;
}
|
b52842fc1a48f9f47bf6d94c4e4e6eb3faf0449d.hip | // !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_gemv_batched_strided_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl"
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 32, 8>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
using GemvKernel = cutlass::gemm::kernel::DefaultGemv<
ThreadBlockShape,
ThreadShape,
float, cutlass::layout::RowMajor,
float, cutlass::layout::RowMajor,
float, cutlass::layout::RowMajor>;
template void megdnn::cuda::cutlass_wrapper::
cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>(
BatchedGemmCoord const& problem_size,
const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a,
const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b,
typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| b52842fc1a48f9f47bf6d94c4e4e6eb3faf0449d.cu | #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_gemv_batched_strided_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl"
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 32, 8>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
using GemvKernel = cutlass::gemm::kernel::DefaultGemv<
ThreadBlockShape,
ThreadShape,
float, cutlass::layout::RowMajor,
float, cutlass::layout::RowMajor,
float, cutlass::layout::RowMajor>;
template void megdnn::cuda::cutlass_wrapper::
cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>(
BatchedGemmCoord const& problem_size,
const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a,
const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b,
typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
c8d5c9e027e17aaffe73e25d4470ede48933399f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <iostream>
#include <random_quda.h>
#include <hip/hip_runtime.h>
#include <quda_internal.h>
#include <comm_quda.h>
#include <index_helper.cuh>
namespace quda {
#define BLOCKSDIVUP(a, b) (((a)+(b)-1)/(b))
dim3 GetBlockDim(size_t threads, size_t size){
int blockx = BLOCKSDIVUP(size, threads);
dim3 blocks(blockx,1,1);
return blocks;
}
# define CUDA_SAFE_CALL_NO_SYNC( call) { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
# define CUDA_SAFE_CALL( call) CUDA_SAFE_CALL_NO_SYNC(call);
/**
@brief CUDA kernel to initialize CURAND RNG states
@param state CURAND RNG state array
@param seed initial seed for RNG
@param rng_size size of the CURAND RNG state array
@param node_offset this parameter is used to skip ahead the index in the sequence, usefull for multigpu.
*/
__global__ void
kernel_random(cuRNGState *state, int seed, int rng_size, int node_offset ){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < rng_size){
/* Each thread gets same seed, a different sequence number, no offset */
hiprand_init(seed, id + node_offset, 0, &state[id]);
}
}
struct rngArg{
int comm_dim[4];
int comm_coord[4];
int X[4];
};
__global__ void
kernel_random(cuRNGState *state, int seed, int rng_size, int node_offset, rngArg arg ){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < rng_size){
/* Each thread gets same seed, a different sequence number, no offset */
#ifndef MULTI_GPU
hiprand_init(seed, id + node_offset, 0, &state[id]);
#else
int x[4];
getCoords(x, id, arg.X, 0);
for(int i=0; i<4;i++) x[i] += arg.comm_coord[i] * arg.X[i];
int idd = ((((x[3] * arg.comm_dim[2] * arg.X[2] + x[2]) * arg.comm_dim[1] * arg.X[1]) + x[1] ) * arg.comm_dim[0] * arg.X[0] + x[0]) >> 1 ;
hiprand_init(seed, idd, 0, &state[id]);
#endif
}
}
/**
@brief Call CUDA kernel to initialize CURAND RNG states
@param state CURAND RNG state array
@param seed initial seed for RNG
@param rng_size size of the CURAND RNG state array
@param node_offset this parameter is used to skip ahead the index in the sequence, usefull for multigpu.
*/
void launch_kernel_random(cuRNGState *state, int seed, int rng_size, int node_offset, int X[4]){
dim3 nthreads(128,1,1);
dim3 nblocks = GetBlockDim(nthreads.x, rng_size);
//CUDA_SAFE_CALL(hipFuncSetCacheConfig( kernel_random, hipFuncCachePreferL1));
#ifndef MULTI_GPU
hipLaunchKernelGGL(( kernel_random), dim3(nblocks),dim3(nthreads), 0, 0, state, seed, rng_size, node_offset);
#else
rngArg arg;
for(int i=0; i < 4; i++){
arg.comm_dim[i] = comm_dim(i);
arg.comm_coord[i] = comm_coord(i);
arg.X[i] = X[i];
}
hipLaunchKernelGGL(( kernel_random), dim3(nblocks),dim3(nthreads), 0, 0, state, seed, rng_size, 0, arg);
#endif
qudaDeviceSynchronize();
}
RNG::RNG(int rng_sizes, int seedin){
rng_size = rng_sizes;
seed = seedin;
state = NULL;
node_offset = 0;
#ifdef MULTI_GPU
for(int i=0; i<4;i++) X[i]=0;
node_offset = comm_rank() * rng_sizes;
#endif
#if defined(XORWOW)
printfQuda("Using curandStateXORWOW\n");
#elif defined(RG32k3a)
printfQuda("Using curandStateMRG32k3a\n");
#else
printfQuda("Using curandStateMRG32k3a\n");
#endif
}
RNG::RNG(int rng_sizes, int seedin, const int XX[4]){
rng_size = rng_sizes;
seed = seedin;
state = NULL;
node_offset = 0;
#ifdef MULTI_GPU
for(int i=0; i<4;i++) X[i]=XX[i];
node_offset = comm_rank() * rng_sizes;
#endif
#if defined(XORWOW)
printfQuda("Using curandStateXORWOW\n");
#elif defined(RG32k3a)
printfQuda("Using curandStateMRG32k3a\n");
#else
printfQuda("Using curandStateMRG32k3a\n");
#endif
}
/**
@brief Initialize CURAND RNG states
*/
void RNG::Init(){
AllocateRNG();
launch_kernel_random(state, seed, rng_size, node_offset, X);
}
/**
@brief Allocate Device memory for CURAND RNG states
*/
void RNG::AllocateRNG(){
if(rng_size>0 && state == NULL){
state = (cuRNGState*)device_malloc(rng_size * sizeof(cuRNGState));
CUDA_SAFE_CALL(hipMemset( state , 0 , rng_size * sizeof(cuRNGState) ));
printfQuda("Allocated array of random numbers with rng_size: %.2f MB\n", rng_size * sizeof(cuRNGState)/(float)(1048576));
}
else{
errorQuda("Array of random numbers not allocated, array size: %d !\nExiting...\n",rng_size);
}
}
/**
@brief Release Device memory for CURAND RNG states
*/
void RNG::Release(){
if(rng_size>0 && state != NULL){
device_free(state);
printfQuda("Free array of random numbers with rng_size: %.2f MB\n", rng_size * sizeof(cuRNGState)/(float)(1048576));
rng_size = 0;
state = NULL;
}
}
/*! @brief Restore CURAND array states initialization */
void RNG::restore(){
hipError_t err = hipMemcpy(state, backup_state, rng_size * sizeof(cuRNGState), hipMemcpyHostToDevice);
if (err != hipSuccess) {
host_free(backup_state);
printfQuda("ERROR: Failed to restore hiprand rng states array\n");
errorQuda("Aborting");
}
host_free(backup_state);
}
/*! @brief Backup CURAND array states initialization */
void RNG::backup(){
backup_state = (cuRNGState*) safe_malloc(rng_size * sizeof(cuRNGState));
hipError_t err = hipMemcpy(backup_state, state, rng_size * sizeof(cuRNGState), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
host_free(backup_state);
printfQuda("ERROR: Failed to backup hiprand rng states array\n");
errorQuda("Aborting");
}
}
}
| c8d5c9e027e17aaffe73e25d4470ede48933399f.cu |
#include <stdio.h>
#include <string.h>
#include <iostream>
#include <random_quda.h>
#include <cuda.h>
#include <quda_internal.h>
#include <comm_quda.h>
#include <index_helper.cuh>
namespace quda {
#define BLOCKSDIVUP(a, b) (((a)+(b)-1)/(b))
dim3 GetBlockDim(size_t threads, size_t size){
int blockx = BLOCKSDIVUP(size, threads);
dim3 blocks(blockx,1,1);
return blocks;
}
# define CUDA_SAFE_CALL_NO_SYNC( call) { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
# define CUDA_SAFE_CALL( call) CUDA_SAFE_CALL_NO_SYNC(call);
/**
@brief CUDA kernel to initialize CURAND RNG states
@param state CURAND RNG state array
@param seed initial seed for RNG
@param rng_size size of the CURAND RNG state array
@param node_offset this parameter is used to skip ahead the index in the sequence, usefull for multigpu.
*/
__global__ void
kernel_random(cuRNGState *state, int seed, int rng_size, int node_offset ){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < rng_size){
/* Each thread gets same seed, a different sequence number, no offset */
curand_init(seed, id + node_offset, 0, &state[id]);
}
}
struct rngArg{
int comm_dim[4];
int comm_coord[4];
int X[4];
};
__global__ void
kernel_random(cuRNGState *state, int seed, int rng_size, int node_offset, rngArg arg ){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < rng_size){
/* Each thread gets same seed, a different sequence number, no offset */
#ifndef MULTI_GPU
curand_init(seed, id + node_offset, 0, &state[id]);
#else
int x[4];
getCoords(x, id, arg.X, 0);
for(int i=0; i<4;i++) x[i] += arg.comm_coord[i] * arg.X[i];
int idd = ((((x[3] * arg.comm_dim[2] * arg.X[2] + x[2]) * arg.comm_dim[1] * arg.X[1]) + x[1] ) * arg.comm_dim[0] * arg.X[0] + x[0]) >> 1 ;
curand_init(seed, idd, 0, &state[id]);
#endif
}
}
/**
@brief Call CUDA kernel to initialize CURAND RNG states
@param state CURAND RNG state array
@param seed initial seed for RNG
@param rng_size size of the CURAND RNG state array
@param node_offset this parameter is used to skip ahead the index in the sequence, usefull for multigpu.
*/
void launch_kernel_random(cuRNGState *state, int seed, int rng_size, int node_offset, int X[4]){
dim3 nthreads(128,1,1);
dim3 nblocks = GetBlockDim(nthreads.x, rng_size);
//CUDA_SAFE_CALL(cudaFuncSetCacheConfig( kernel_random, cudaFuncCachePreferL1));
#ifndef MULTI_GPU
kernel_random<<<nblocks,nthreads>>>(state, seed, rng_size, node_offset);
#else
rngArg arg;
for(int i=0; i < 4; i++){
arg.comm_dim[i] = comm_dim(i);
arg.comm_coord[i] = comm_coord(i);
arg.X[i] = X[i];
}
kernel_random<<<nblocks,nthreads>>>(state, seed, rng_size, 0, arg);
#endif
qudaDeviceSynchronize();
}
RNG::RNG(int rng_sizes, int seedin){
rng_size = rng_sizes;
seed = seedin;
state = NULL;
node_offset = 0;
#ifdef MULTI_GPU
for(int i=0; i<4;i++) X[i]=0;
node_offset = comm_rank() * rng_sizes;
#endif
#if defined(XORWOW)
printfQuda("Using curandStateXORWOW\n");
#elif defined(RG32k3a)
printfQuda("Using curandStateMRG32k3a\n");
#else
printfQuda("Using curandStateMRG32k3a\n");
#endif
}
RNG::RNG(int rng_sizes, int seedin, const int XX[4]){
rng_size = rng_sizes;
seed = seedin;
state = NULL;
node_offset = 0;
#ifdef MULTI_GPU
for(int i=0; i<4;i++) X[i]=XX[i];
node_offset = comm_rank() * rng_sizes;
#endif
#if defined(XORWOW)
printfQuda("Using curandStateXORWOW\n");
#elif defined(RG32k3a)
printfQuda("Using curandStateMRG32k3a\n");
#else
printfQuda("Using curandStateMRG32k3a\n");
#endif
}
/**
@brief Initialize CURAND RNG states
*/
void RNG::Init(){
AllocateRNG();
launch_kernel_random(state, seed, rng_size, node_offset, X);
}
/**
@brief Allocate Device memory for CURAND RNG states
*/
void RNG::AllocateRNG(){
if(rng_size>0 && state == NULL){
state = (cuRNGState*)device_malloc(rng_size * sizeof(cuRNGState));
CUDA_SAFE_CALL(cudaMemset( state , 0 , rng_size * sizeof(cuRNGState) ));
printfQuda("Allocated array of random numbers with rng_size: %.2f MB\n", rng_size * sizeof(cuRNGState)/(float)(1048576));
}
else{
errorQuda("Array of random numbers not allocated, array size: %d !\nExiting...\n",rng_size);
}
}
/**
@brief Release Device memory for CURAND RNG states
*/
void RNG::Release(){
if(rng_size>0 && state != NULL){
device_free(state);
printfQuda("Free array of random numbers with rng_size: %.2f MB\n", rng_size * sizeof(cuRNGState)/(float)(1048576));
rng_size = 0;
state = NULL;
}
}
/*! @brief Restore CURAND array states initialization */
void RNG::restore(){
cudaError_t err = cudaMemcpy(state, backup_state, rng_size * sizeof(cuRNGState), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
host_free(backup_state);
printfQuda("ERROR: Failed to restore curand rng states array\n");
errorQuda("Aborting");
}
host_free(backup_state);
}
/*! @brief Backup CURAND array states initialization */
void RNG::backup(){
backup_state = (cuRNGState*) safe_malloc(rng_size * sizeof(cuRNGState));
cudaError_t err = cudaMemcpy(backup_state, state, rng_size * sizeof(cuRNGState), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
host_free(backup_state);
printfQuda("ERROR: Failed to backup curand rng states array\n");
errorQuda("Aborting");
}
}
}
|
d314dacc5c6f99707de4effb59b126b2a58e7108.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// High level notes
// Frankly speaking, simulation of a language is not the best suited to gpu execution and comes with some serious considerations:
// The completely generic control flow is fundamentally a loop in which each iteration is dependent on the last
// This alone precludes many optimizations which are typically desirable; with how generic things are the loop cannot be parallelized (save _maybe_ certain explicitly defined special cases)
// Since we do want to have a maximum execution time per program, the loop length does indeed have an upper bound, which, if structured appropriately, could afford some optimization in the way of loop unrolling
// However, on the surface of it this may not be compatible with conditional short-circuiting of the loop which itself is desirable enough to probably not be worth giving up (many programs will end very quickly)
// That said, highly controlled partial loop unrollment (essentially implemented as a nested loop which runs a constant number of times) is also an option which is worth exploring,
// the inner loop would essentially no-op on each loop execution after it has short-circuted.
// I suppose it is possible that cuda would try something like this anyway, and while I find it unlikely that it is smart enough to figure it out in this case it should be investigated before attempting alternatives
//
// An interesting (crazy) thought is to attempt to directly do speculative execution and/or branch prediction with the bf program's control flow.
// This may not be as intractable as it first seems due to the fact that the control flow is both quite simple and represented directly (all cases are a zero/non-zero jump),
// as well as the fact that the theoretical pipelining _should_ be somewhat straightforward
//
// Another thought is to actually directly compile the bf programs to gpu assembly before execution. My intuition is that this per-program compilation overhead would quickly overwhelm the increased "runtime" optimization
// if this were to be done the compilation would have to be extremely simple, probably close to 1-to-1
// That said, bf is simple enough that a somewhat 1-to-1 compilation would theoretically be done quite quickly, so depending on how fast the gpu assembly can emit this _maybe_ could work
// Another huge confounder is that this would require seperate executable memory to be uploaded per thread/block, I have no idea if that is even possible without multiple host calls
// I suppose to the last point, this could also be done with regular cpu assembly or an intermediate like llvm, which might be an interesting avenue
//
// Having a good profiling baseline will be very important if we actually get serious about attempting to optimize this down
// Input format includes the following
// chunk_count_per_program:
// represents the number of program chunks per program
// 1d array of uint8, dimension [PROGRAM_COUNT_PER_BLOCK]
// data_ops:
// represents a sequence of +-<> ops
// 3d array of uint8, dimension [DATA_CELL_COUNT+1][MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK]
// each program member array at index [program_index] of dimension [DATA_CELL_COUNT+1][MAX_CHUNK_COUNT_PER_PROGRAM] only has a meaningful size of chunk_count_per_program[program_index]
// each program chunk member array at index [program_index][program_chunk_index] of dimension [DATA_CELL_COUNT+1] has the following format:
// [ data_ptr_diff, data_cell_0_diff, data_cell_1_diff, ..., data_cell_{DATA_CELL_COUNT-1}_diff ]
// the first element is data_ptr_diff, this could be moved into its own array, however my thought is that locality of data is relevent here, especially since modification is a simple add similar to data modification
// that said there may not be caching concerns as this should probably be only relevent in a single thread, and the volume of data per program is relatively small, worth testing a version which separates this
// io_ops:
// represents a . or , op
// 2d array of IO_OP_t, dimension [MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK]
// each program member array at index [program_index] of dimension [MAX_CHUNK_COUNT_PER_PROGRAM] only has a meaningful size of chunk_count_per_program[program_index]
// the members of this array each associated with a single program chunk represent a read from input (,) to the current data cell or a write to output (.) from the current data cell
// in both cases, the respective io pointer will be incremented and wrap
// control_ops:
// represents a [ or ] op
// 3d array of uint8, dimension [2][MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK]
// each program member array at index [program_index] of dimension [2][MAX_CHUNK_COUNT_PER_PROGRAM] only has a meaningful size of chunk_count_per_program[program_index]
// each program chunk member array at index [program_index][program_chunk_index] of dimension [2] represents the next chunk if the current data cell is 0, and the next chunk if the current data cell is nonzero
// these members are chunk indexes within the current program and are guaranteed to be less than or equal to chunk_count_per_program[program_index]
// if the next index is equla to chunk_count_per_program[program_index] the program terminates
// NOTE: the following could be made significantly more memory efficient if we wish to have the same input data for each block, or even for the entire block grid, though there may be data access speed implications, especially in the latter case
// input_data_count_per_program
// represents the length of input_data per program
// 1d array of uint8, dimension [PROGRAM_COUNT_PER_BLOCK]
// input_data
// represents the sequence to be read using ,
// 2d array of uint8, dimension [MAX_INPUT_DATA_COUNT][PROGRAM_COUNT_PER_BLOCK]
// each program member array at index [program_index] of dimension [MAX_INPUT_DATA_COUNT] is 0 terminated and only has a meaningful size up to the first 0
// the input_data_ptr will wrap to the beginning whenever it encounters a 0
// Execution notes
// The execution flow is as follows:
// data_ops[current_chunk_index] is applied to the working data_cell_array
// io_ops[current_chunk_index] is applied if present, copying between the current data cell, input stream, or output stream as necessary
// control_ops[current_chunk_index] is used to find the next chunk to execute, if the next chunk is out of bounds or if the chunks_executed_counter reaches MAX_CHUNKS_EXECUTED_COUNTER the program is terminated
// it might be worth experimenting with uint16 or uint32 rather than uint8 for potential speedups regarding data alignment,
// I do not know how important data alignment for integral operations is on the gpu as opposed to the cpu, however my intuition is that the gpu should support per byte operations decently well, and that the memory overhead would not justify these changes
// in regards to this remember that memory overhead can translate to performance overhead very easily when factoring in per thread or per block processor caches as well as actual coprocessor data transfer
#include <hip/hip_cooperative_groups.h>
using namespace cooperative_groups;
// NOTE: We use preprocessing macros purely for performance reasons
#define PROGRAM_COUNT_PER_BLOCK 256
#define DATA_CELL_COUNT 31 /* NOTE: this plus 1 should be 4 byte aligned; might be particularly worth attempting 15, as cuda can handle up to 16 bytes in a single instruction */
#define MAX_CHUNK_COUNT_PER_PROGRAM 16 /* NOTE: we can increase this if necessary, but every programs' memory scales linearly with this no matter how much of it they use */
#define MAX_INPUT_DATA_COUNT 16
#define MAX_OUTPUT_DATA_COUNT 16
#define MAX_CHUNKS_EXECUTED_COUNTER 1024 /* NOTE: we can experiment a good deal with this value */
// TODO: enum is probably the same as this after compile and definitely better practice
#define IO_OP_t uint8_t
#define IO_OP_NONE 0
#define IO_OP_INPUT 1
#define IO_OP_OUTPUT 2
#define WORKING_DATA_PTR_OFFSET DATA_CELL_COUNT
#define DATA_CELL_MIN (DATA_CELL_COUNT / 2)
#define DATA_CELL_MAX (3 * DATA_CELL_COUNT / 2)
#define DATA_OP_OFFEST (DATA_CELL_COUNT / 2)
// TODO: current version has a lot of problems as discussed in various comments and the wall of text above, however I want to get a simple running version before prematurely addressing them
__global__ void ExecuteBfKernal(
uint8_t chunk_count_per_program[PROGRAM_COUNT_PER_BLOCK],
uint8_t data_ops[DATA_CELL_COUNT+1][MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK],
IO_OP_t io_ops[MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK],
uint8_t control_ops[2][MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK],
uint8_t input_data_count_per_program[PROGRAM_COUNT_PER_BLOCK],
uint8_t input_data[MAX_INPUT_DATA_COUNT][PROGRAM_COUNT_PER_BLOCK],
uint8_t output_data[MAX_OUTPUT_DATA_COUNT][PROGRAM_COUNT_PER_BLOCK],
)
{
uint8_t program_index = threadIdx.x;
uint8_t current_chunk_ptr = 0;
uint8_t current_working_data_ptr = WORKING_DATA_PTR_OFFSET;
// Figure out if this needs to be explicitly zero initialized
uint8_t current_working_data[DATA_CELL_COUNT * 2];
uint8_t current_input_data_ptr = 0;
uint8_t current_output_data_ptr = 0;
// NOTE: see high level notes for my thoughts on the potential performance optimizations of this loop
for (uint32_t i = MAX_CHUNKS_EXECUTED_COUNTER; i > 0; i--)
{
// execute data ops
// NOTE: this might be worth explicitely splitting up over a warp, or some form of cooperative group, though this is certainly non-trivial
for (uint32_t working_data_index = 1; i < DATA_CELL_COUNT; i++)
{
current_working_data[current_working_data_ptr + working_data_index - DATA_OP_OFFEST - 1] += data_ops[program_index][current_chunk_ptr][working_data_index];
}
current_working_data_ptr += data_ops[program_index][current_chunk_ptr][0];
// verify data pointer is still in bounds
// NOTE: this worries me perf-wise
if (current_working_data_ptr < DATA_CELL_MIN || current_working_data_ptr >= DATA_CELL_MAX)
{
break;
}
// execute io ops
// NOTE: cuda should be smart enough not to emit diverging branches here, but this should probably be confirmed
IO_OP_t io_op = io_ops[program_index][current_chunk_ptr];
if (io_op == IO_OP_INPUT)
{
current_working_data[current_working_data_ptr] = input_data[program_index][current_input_data_ptr];
current_input_data_ptr = (current_input_data_ptr + 1) % input_data_count_per_program[program_index];
}
if (io_op == IO_OP_OUTPUT)
{
current_output_data[current_output_data_ptr] = current_working_data[current_working_data_ptr];
current_output_data_ptr = (current_output_data_ptr + 1) % MAX_OUTPUT_DATA_COUNT;
}
// execute control flow ops
// NOTE: even if cuda is smart enough not to emit diverging branches here, I think it's better to just access using a conditional directly
current_chunk_ptr = control_ops[program_index][program_chunk_index][ current_working_data[current_working_data_ptr] != 0 ]
// Check if program has regularly termintated
// NOTE: this worries me perf-wise
if (current_chunk_ptr >= chunk_count_per_program[program_index])
{
break;
}
}
}
struct ExecuteBfParams
{
uint8_t chunk_count_per_program[PROGRAM_COUNT_PER_BLOCK],
uint8_t data_ops[DATA_CELL_COUNT+1][MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK],
IO_OP_t io_ops[MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK],
uint8_t control_ops[2][MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK],
uint8_t input_data_count_per_program[PROGRAM_COUNT_PER_BLOCK],
uint8_t input_data[MAX_INPUT_DATA_COUNT][PROGRAM_COUNT_PER_BLOCK],
uint8_t output_data[MAX_OUTPUT_DATA_COUNT][PROGRAM_COUNT_PER_BLOCK],
}
void ExecuteBfCuda(ExecuteBfCudaParams & params)
{
// Essentially just moves data from host to device and back after execution
uint8_t* DEVICE_chunk_count_per_program;
uint8_t* DEVICE_data_ops;
IO_OP_t* DEVICE_io_ops;
uint8_t* DEVICE_control_ops;
uint8_t* DEVICE_input_data_count_per_program;
uint8_t* DEVICE_input_data;
uint8_t* DEVICE_output_data;
hipMalloc(&DEVICE_chunk_count_per_program, PROGRAM_COUNT_PER_BLOCK);
hipMalloc(&DEVICE_data_ops, (DATA_CELL_COUNT+1) * MAX_CHUNK_COUNT_PER_PROGRAM * PROGRAM_COUNT_PER_BLOCK);
hipMalloc(&DEVICE_io_ops, MAX_CHUNK_COUNT_PER_PROGRAM * PROGRAM_COUNT_PER_BLOCK);
hipMalloc(&DEVICE_control_ops, 2 * MAX_CHUNK_COUNT_PER_PROGRAM * PROGRAM_COUNT_PER_BLOCK);
hipMalloc(&DEVICE_input_data_count_per_program, PROGRAM_COUNT_PER_BLOCK);
hipMalloc(&DEVICE_input_data, MAX_INPUT_DATA_COUNT * PROGRAM_COUNT_PER_BLOCK);
hipMalloc(&DEVICE_output_data, MAX_OUTPUT_DATA_COUNT * PROGRAM_COUNT_PER_BLOCK);
hipMemcpy(DEVICE_chunk_count_per_program, params.chunk_count_per_program, PROGRAM_COUNT_PER_BLOCK, hipMemcpyHostToDevice);
hipMemcpy(DEVICE_data_ops, params.data_ops, (DATA_CELL_COUNT+1) * MAX_CHUNK_COUNT_PER_PROGRAM * PROGRAM_COUNT_PER_BLOCK, hipMemcpyHostToDevice);
hipMemcpy(DEVICE_io_ops, params.io_ops, MAX_CHUNK_COUNT_PER_PROGRAM * PROGRAM_COUNT_PER_BLOCK, hipMemcpyHostToDevice);
hipMemcpy(DEVICE_control_ops, params.control_ops, 2 * MAX_CHUNK_COUNT_PER_PROGRAM * PROGRAM_COUNT_PER_BLOCK, hipMemcpyHostToDevice);
hipMemcpy(DEVICE_input_data_count_per_program, params.input_data_count_per_program, PROGRAM_COUNT_PER_BLOCK, hipMemcpyHostToDevice);
hipMemcpy(DEVICE_input_data, params.input_data, MAX_INPUT_DATA_COUNT * PROGRAM_COUNT_PER_BLOCK, hipMemcpyHostToDevice);
hipMemset(DEVICE_output_data, 0, MAX_OUTPUT_DATA_COUNT * PROGRAM_COUNT_PER_BLOCK);
hipLaunchKernelGGL(( ExecuteBf), dim3(1), dim3(PROGRAM_COUNT_PER_BLOCK), 0, 0,
DEVICE_chunk_count_per_program,
DEVICE_data_ops,
DEVICE_io_ops,
DEVICE_control_ops,
DEVICE_input_data_count_per_program,
DEVICE_input_data,
DEVICE_output_data,
);
hipMemcpy(params.output_data, DEVICE_output_data, MAX_OUTPUT_DATA_COUNT * PROGRAM_COUNT_PER_BLOCK, hipMemcpyDeviceToHost);
hipFree(DEVICE_chunk_count_per_program);
hipFree(DEVICE_data_ops);
hipFree(DEVICE_io_ops);
hipFree(DEVICE_control_ops);
hipFree(DEVICE_input_data_count_per_program);
hipFree(DEVICE_input_data);
hipFree(DEVICE_output_data);
}
void BfSourceToExecuteParams(char const * const source, ExecuteBfParams& params, uint32_t program_index)
{
char source_char;
uint32_t source_index = 0;
uint8_t current_chunk_index = 0;
uint8_t current_chunk_data_op[DATA_CELL_COUNT];
memset(current_chunk_data_op, 0, DATA_CELL_COUNT);
uint8_t current_chunk_data_ptr = DATA_OP_OFFEST;
while (source_char = source[source_index++])
{
switch (source_char)
{
case '+':
current_chunk_data_op[current_chunk_data_ptr] += 1;
break;
case '-':
current_chunk_data_op[current_chunk_data_ptr] -= 1;
break;
case '>':
current_chunk_data_ptr += 1;
if (current_chunk_data_ptr == DATA_CELL_COUNT - 1)
{
// TODO: fail gracefully
throw 1;
}
break;
case '<':
if (current_chunk_data_ptr == 0)
{
// TODO: fail gracefully
throw 1;
}
current_chunk_data_ptr -= 1;
break;
case '.':
// commit data op
uint8_t* param_dest = &(params.data_ops[program_index][current_chunk_index][1])
memcpy(current_chunk_data_op, param_dest, DATA_CELL_COUNT);
params.data_ops[program_index][current_chunk_index][0] = current_chunk_data_ptr;
// clear data op
memset(current_chunk_data_op, 0, DATA_CELL_COUNT);
current_chunk_data_ptr = DATA_OP_OFFEST;
}
}
}
// This should not be used for actual execution as it loads only a single program onto the gpu, it's only purpose is a quick test functionality
void ExecuteBfSingle(char const * const program)
{
}
int main()
{
}
| d314dacc5c6f99707de4effb59b126b2a58e7108.cu | // High level notes
// Frankly speaking, simulation of a language is not the best suited to gpu execution and comes with some serious considerations:
// The completely generic control flow is fundamentally a loop in which each iteration is dependent on the last
// This alone precludes many optimizations which are typically desirable; with how generic things are the loop cannot be parallelized (save _maybe_ certain explicitly defined special cases)
// Since we do want to have a maximum execution time per program, the loop length does indeed have an upper bound, which, if structured appropriately, could afford some optimization in the way of loop unrolling
// However, on the surface of it this may not be compatible with conditional short-circuiting of the loop which itself is desirable enough to probably not be worth giving up (many programs will end very quickly)
// That said, highly controlled partial loop unrollment (essentially implemented as a nested loop which runs a constant number of times) is also an option which is worth exploring,
// the inner loop would essentially no-op on each loop execution after it has short-circuted.
// I suppose it is possible that cuda would try something like this anyway, and while I find it unlikely that it is smart enough to figure it out in this case it should be investigated before attempting alternatives
//
// An interesting (crazy) thought is to attempt to directly do speculative execution and/or branch prediction with the bf program's control flow.
// This may not be as intractable as it first seems due to the fact that the control flow is both quite simple and represented directly (all cases are a zero/non-zero jump),
// as well as the fact that the theoretical pipelining _should_ be somewhat straightforward
//
// Another thought is to actually directly compile the bf programs to gpu assembly before execution. My intuition is that this per-program compilation overhead would quickly overwhelm the increased "runtime" optimization
// if this were to be done the compilation would have to be extremely simple, probably close to 1-to-1
// That said, bf is simple enough that a somewhat 1-to-1 compilation would theoretically be done quite quickly, so depending on how fast the gpu assembly can emit this _maybe_ could work
// Another huge confounder is that this would require seperate executable memory to be uploaded per thread/block, I have no idea if that is even possible without multiple host calls
// I suppose to the last point, this could also be done with regular cpu assembly or an intermediate like llvm, which might be an interesting avenue
//
// Having a good profiling baseline will be very important if we actually get serious about attempting to optimize this down
// Input format includes the following
// chunk_count_per_program:
// represents the number of program chunks per program
// 1d array of uint8, dimension [PROGRAM_COUNT_PER_BLOCK]
// data_ops:
// represents a sequence of +-<> ops
// 3d array of uint8, dimension [DATA_CELL_COUNT+1][MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK]
// each program member array at index [program_index] of dimension [DATA_CELL_COUNT+1][MAX_CHUNK_COUNT_PER_PROGRAM] only has a meaningful size of chunk_count_per_program[program_index]
// each program chunk member array at index [program_index][program_chunk_index] of dimension [DATA_CELL_COUNT+1] has the following format:
// [ data_ptr_diff, data_cell_0_diff, data_cell_1_diff, ..., data_cell_{DATA_CELL_COUNT-1}_diff ]
// the first element is data_ptr_diff, this could be moved into its own array, however my thought is that locality of data is relevent here, especially since modification is a simple add similar to data modification
// that said there may not be caching concerns as this should probably be only relevent in a single thread, and the volume of data per program is relatively small, worth testing a version which separates this
// io_ops:
// represents a . or , op
// 2d array of IO_OP_t, dimension [MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK]
// each program member array at index [program_index] of dimension [MAX_CHUNK_COUNT_PER_PROGRAM] only has a meaningful size of chunk_count_per_program[program_index]
// the members of this array each associated with a single program chunk represent a read from input (,) to the current data cell or a write to output (.) from the current data cell
// in both cases, the respective io pointer will be incremented and wrap
// control_ops:
// represents a [ or ] op
// 3d array of uint8, dimension [2][MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK]
// each program member array at index [program_index] of dimension [2][MAX_CHUNK_COUNT_PER_PROGRAM] only has a meaningful size of chunk_count_per_program[program_index]
// each program chunk member array at index [program_index][program_chunk_index] of dimension [2] represents the next chunk if the current data cell is 0, and the next chunk if the current data cell is nonzero
// these members are chunk indexes within the current program and are guaranteed to be less than or equal to chunk_count_per_program[program_index]
// if the next index is equla to chunk_count_per_program[program_index] the program terminates
// NOTE: the following could be made significantly more memory efficient if we wish to have the same input data for each block, or even for the entire block grid, though there may be data access speed implications, especially in the latter case
// input_data_count_per_program
// represents the length of input_data per program
// 1d array of uint8, dimension [PROGRAM_COUNT_PER_BLOCK]
// input_data
// represents the sequence to be read using ,
// 2d array of uint8, dimension [MAX_INPUT_DATA_COUNT][PROGRAM_COUNT_PER_BLOCK]
// each program member array at index [program_index] of dimension [MAX_INPUT_DATA_COUNT] is 0 terminated and only has a meaningful size up to the first 0
// the input_data_ptr will wrap to the beginning whenever it encounters a 0
// Execution notes
// The execution flow is as follows:
// data_ops[current_chunk_index] is applied to the working data_cell_array
// io_ops[current_chunk_index] is applied if present, copying between the current data cell, input stream, or output stream as necessary
// control_ops[current_chunk_index] is used to find the next chunk to execute, if the next chunk is out of bounds or if the chunks_executed_counter reaches MAX_CHUNKS_EXECUTED_COUNTER the program is terminated
// it might be worth experimenting with uint16 or uint32 rather than uint8 for potential speedups regarding data alignment,
// I do not know how important data alignment for integral operations is on the gpu as opposed to the cpu, however my intuition is that the gpu should support per byte operations decently well, and that the memory overhead would not justify these changes
// in regards to this remember that memory overhead can translate to performance overhead very easily when factoring in per thread or per block processor caches as well as actual coprocessor data transfer
#include <cooperative_groups.h>
using namespace cooperative_groups;
// NOTE: We use preprocessing macros purely for performance reasons
#define PROGRAM_COUNT_PER_BLOCK 256
#define DATA_CELL_COUNT 31 /* NOTE: this plus 1 should be 4 byte aligned; might be particularly worth attempting 15, as cuda can handle up to 16 bytes in a single instruction */
#define MAX_CHUNK_COUNT_PER_PROGRAM 16 /* NOTE: we can increase this if necessary, but every programs' memory scales linearly with this no matter how much of it they use */
#define MAX_INPUT_DATA_COUNT 16
#define MAX_OUTPUT_DATA_COUNT 16
#define MAX_CHUNKS_EXECUTED_COUNTER 1024 /* NOTE: we can experiment a good deal with this value */
// TODO: enum is probably the same as this after compile and definitely better practice
#define IO_OP_t uint8_t
#define IO_OP_NONE 0
#define IO_OP_INPUT 1
#define IO_OP_OUTPUT 2
#define WORKING_DATA_PTR_OFFSET DATA_CELL_COUNT
#define DATA_CELL_MIN (DATA_CELL_COUNT / 2)
#define DATA_CELL_MAX (3 * DATA_CELL_COUNT / 2)
#define DATA_OP_OFFEST (DATA_CELL_COUNT / 2)
// TODO: current version has a lot of problems as discussed in various comments and the wall of text above, however I want to get a simple running version before prematurely addressing them
__global__ void ExecuteBfKernal(
uint8_t chunk_count_per_program[PROGRAM_COUNT_PER_BLOCK],
uint8_t data_ops[DATA_CELL_COUNT+1][MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK],
IO_OP_t io_ops[MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK],
uint8_t control_ops[2][MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK],
uint8_t input_data_count_per_program[PROGRAM_COUNT_PER_BLOCK],
uint8_t input_data[MAX_INPUT_DATA_COUNT][PROGRAM_COUNT_PER_BLOCK],
uint8_t output_data[MAX_OUTPUT_DATA_COUNT][PROGRAM_COUNT_PER_BLOCK],
)
{
uint8_t program_index = threadIdx.x;
uint8_t current_chunk_ptr = 0;
uint8_t current_working_data_ptr = WORKING_DATA_PTR_OFFSET;
// Figure out if this needs to be explicitly zero initialized
uint8_t current_working_data[DATA_CELL_COUNT * 2];
uint8_t current_input_data_ptr = 0;
uint8_t current_output_data_ptr = 0;
// NOTE: see high level notes for my thoughts on the potential performance optimizations of this loop
for (uint32_t i = MAX_CHUNKS_EXECUTED_COUNTER; i > 0; i--)
{
// execute data ops
// NOTE: this might be worth explicitely splitting up over a warp, or some form of cooperative group, though this is certainly non-trivial
for (uint32_t working_data_index = 1; i < DATA_CELL_COUNT; i++)
{
current_working_data[current_working_data_ptr + working_data_index - DATA_OP_OFFEST - 1] += data_ops[program_index][current_chunk_ptr][working_data_index];
}
current_working_data_ptr += data_ops[program_index][current_chunk_ptr][0];
// verify data pointer is still in bounds
// NOTE: this worries me perf-wise
if (current_working_data_ptr < DATA_CELL_MIN || current_working_data_ptr >= DATA_CELL_MAX)
{
break;
}
// execute io ops
// NOTE: cuda should be smart enough not to emit diverging branches here, but this should probably be confirmed
IO_OP_t io_op = io_ops[program_index][current_chunk_ptr];
if (io_op == IO_OP_INPUT)
{
current_working_data[current_working_data_ptr] = input_data[program_index][current_input_data_ptr];
current_input_data_ptr = (current_input_data_ptr + 1) % input_data_count_per_program[program_index];
}
if (io_op == IO_OP_OUTPUT)
{
current_output_data[current_output_data_ptr] = current_working_data[current_working_data_ptr];
current_output_data_ptr = (current_output_data_ptr + 1) % MAX_OUTPUT_DATA_COUNT;
}
// execute control flow ops
// NOTE: even if cuda is smart enough not to emit diverging branches here, I think it's better to just access using a conditional directly
current_chunk_ptr = control_ops[program_index][program_chunk_index][ current_working_data[current_working_data_ptr] != 0 ]
// Check if program has regularly termintated
// NOTE: this worries me perf-wise
if (current_chunk_ptr >= chunk_count_per_program[program_index])
{
break;
}
}
}
struct ExecuteBfParams
{
uint8_t chunk_count_per_program[PROGRAM_COUNT_PER_BLOCK],
uint8_t data_ops[DATA_CELL_COUNT+1][MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK],
IO_OP_t io_ops[MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK],
uint8_t control_ops[2][MAX_CHUNK_COUNT_PER_PROGRAM][PROGRAM_COUNT_PER_BLOCK],
uint8_t input_data_count_per_program[PROGRAM_COUNT_PER_BLOCK],
uint8_t input_data[MAX_INPUT_DATA_COUNT][PROGRAM_COUNT_PER_BLOCK],
uint8_t output_data[MAX_OUTPUT_DATA_COUNT][PROGRAM_COUNT_PER_BLOCK],
}
void ExecuteBfCuda(ExecuteBfCudaParams & params)
{
// Essentially just moves data from host to device and back after execution
uint8_t* DEVICE_chunk_count_per_program;
uint8_t* DEVICE_data_ops;
IO_OP_t* DEVICE_io_ops;
uint8_t* DEVICE_control_ops;
uint8_t* DEVICE_input_data_count_per_program;
uint8_t* DEVICE_input_data;
uint8_t* DEVICE_output_data;
cudaMalloc(&DEVICE_chunk_count_per_program, PROGRAM_COUNT_PER_BLOCK);
cudaMalloc(&DEVICE_data_ops, (DATA_CELL_COUNT+1) * MAX_CHUNK_COUNT_PER_PROGRAM * PROGRAM_COUNT_PER_BLOCK);
cudaMalloc(&DEVICE_io_ops, MAX_CHUNK_COUNT_PER_PROGRAM * PROGRAM_COUNT_PER_BLOCK);
cudaMalloc(&DEVICE_control_ops, 2 * MAX_CHUNK_COUNT_PER_PROGRAM * PROGRAM_COUNT_PER_BLOCK);
cudaMalloc(&DEVICE_input_data_count_per_program, PROGRAM_COUNT_PER_BLOCK);
cudaMalloc(&DEVICE_input_data, MAX_INPUT_DATA_COUNT * PROGRAM_COUNT_PER_BLOCK);
cudaMalloc(&DEVICE_output_data, MAX_OUTPUT_DATA_COUNT * PROGRAM_COUNT_PER_BLOCK);
cudaMemcpy(DEVICE_chunk_count_per_program, params.chunk_count_per_program, PROGRAM_COUNT_PER_BLOCK, cudaMemcpyHostToDevice);
cudaMemcpy(DEVICE_data_ops, params.data_ops, (DATA_CELL_COUNT+1) * MAX_CHUNK_COUNT_PER_PROGRAM * PROGRAM_COUNT_PER_BLOCK, cudaMemcpyHostToDevice);
cudaMemcpy(DEVICE_io_ops, params.io_ops, MAX_CHUNK_COUNT_PER_PROGRAM * PROGRAM_COUNT_PER_BLOCK, cudaMemcpyHostToDevice);
cudaMemcpy(DEVICE_control_ops, params.control_ops, 2 * MAX_CHUNK_COUNT_PER_PROGRAM * PROGRAM_COUNT_PER_BLOCK, cudaMemcpyHostToDevice);
cudaMemcpy(DEVICE_input_data_count_per_program, params.input_data_count_per_program, PROGRAM_COUNT_PER_BLOCK, cudaMemcpyHostToDevice);
cudaMemcpy(DEVICE_input_data, params.input_data, MAX_INPUT_DATA_COUNT * PROGRAM_COUNT_PER_BLOCK, cudaMemcpyHostToDevice);
cudaMemset(DEVICE_output_data, 0, MAX_OUTPUT_DATA_COUNT * PROGRAM_COUNT_PER_BLOCK);
ExecuteBf<<<1, PROGRAM_COUNT_PER_BLOCK>>>(
DEVICE_chunk_count_per_program,
DEVICE_data_ops,
DEVICE_io_ops,
DEVICE_control_ops,
DEVICE_input_data_count_per_program,
DEVICE_input_data,
DEVICE_output_data,
);
cudaMemcpy(params.output_data, DEVICE_output_data, MAX_OUTPUT_DATA_COUNT * PROGRAM_COUNT_PER_BLOCK, cudaMemcpyDeviceToHost);
cudaFree(DEVICE_chunk_count_per_program);
cudaFree(DEVICE_data_ops);
cudaFree(DEVICE_io_ops);
cudaFree(DEVICE_control_ops);
cudaFree(DEVICE_input_data_count_per_program);
cudaFree(DEVICE_input_data);
cudaFree(DEVICE_output_data);
}
void BfSourceToExecuteParams(char const * const source, ExecuteBfParams& params, uint32_t program_index)
{
char source_char;
uint32_t source_index = 0;
uint8_t current_chunk_index = 0;
uint8_t current_chunk_data_op[DATA_CELL_COUNT];
memset(current_chunk_data_op, 0, DATA_CELL_COUNT);
uint8_t current_chunk_data_ptr = DATA_OP_OFFEST;
while (source_char = source[source_index++])
{
switch (source_char)
{
case '+':
current_chunk_data_op[current_chunk_data_ptr] += 1;
break;
case '-':
current_chunk_data_op[current_chunk_data_ptr] -= 1;
break;
case '>':
current_chunk_data_ptr += 1;
if (current_chunk_data_ptr == DATA_CELL_COUNT - 1)
{
// TODO: fail gracefully
throw 1;
}
break;
case '<':
if (current_chunk_data_ptr == 0)
{
// TODO: fail gracefully
throw 1;
}
current_chunk_data_ptr -= 1;
break;
case '.':
// commit data op
uint8_t* param_dest = &(params.data_ops[program_index][current_chunk_index][1])
memcpy(current_chunk_data_op, param_dest, DATA_CELL_COUNT);
params.data_ops[program_index][current_chunk_index][0] = current_chunk_data_ptr;
// clear data op
memset(current_chunk_data_op, 0, DATA_CELL_COUNT);
current_chunk_data_ptr = DATA_OP_OFFEST;
}
}
}
// This should not be used for actual execution as it loads only a single program onto the gpu, it's only purpose is a quick test functionality
void ExecuteBfSingle(char const * const program)
{
}
int main()
{
}
|
54bb73c18a00be806e3732cd9173c52ccfae4abf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <gtest/gtest.h>
#include "fbgemm_gpu/batched_unary_embedding_wrappers.cuh"
#include "fbgemm_gpu/cuda_utils.cuh"
class FBGEMMGPUBatchUnaryEmbeddingTest : public ::testing::Test {
protected:
static long* indices;
static long* offsets;
static float* weights;
static int* hash_sizes;
static float* output_ref;
static long* table_offsets;
static float* grad_weight_ref;
static float* grad_output_ref;
constexpr static int indices_size = 6;
constexpr static int T = 2;
constexpr static int num_task = 2;
constexpr static int B = 3;
static void SetUpTestCase() {
hash_sizes = new int[T]{2, 3};
table_offsets = new long[T + 1]{0, 2, 5};
offsets = new long[indices_size + 1]{0, 1, 2, 3, 4, 5, 6};
indices = new long[indices_size]{1, 1, 1, 0, 2, 1};
weights = new float[(hash_sizes[0] + hash_sizes[1]) * num_task]{-0.1264,
0.2836,
-0.5619,
0.5717,
0.1725,
-0.2929,
0.2342,
0.0099,
-0.5364,
-0.4393};
output_ref = new float[T * num_task * B]{0.2836,
-0.5619,
0.2836,
0.1725,
0.2836,
0.5717,
0.2342,
0.0099,
0.2342,
-0.4393,
0.2342,
-0.5364};
grad_output_ref = new float[T * num_task * B]{-0.1434,
0.0576,
-0.0076,
-0.1141,
-0.0708,
-0.0876,
-0.0649,
-0.0071,
-0.0240,
0.1031,
0.2131,
-0.1412};
grad_weight_ref =
new float[(hash_sizes[0] + hash_sizes[1]) * num_task]{-0.0000,
-0.2218,
0.0576,
-0.0876,
-0.1141,
0.0000,
0.1243,
-0.0071,
-0.1412,
0.1031};
}
static void TearDownTestCase() {
delete[] indices;
delete[] offsets;
delete[] weights;
delete[] hash_sizes;
delete[] output_ref;
delete[] table_offsets;
delete[] grad_weight_ref;
delete[] grad_output_ref;
}
};
long* FBGEMMGPUBatchUnaryEmbeddingTest::indices;
long* FBGEMMGPUBatchUnaryEmbeddingTest::offsets;
int* FBGEMMGPUBatchUnaryEmbeddingTest::hash_sizes;
float* FBGEMMGPUBatchUnaryEmbeddingTest::weights;
float* FBGEMMGPUBatchUnaryEmbeddingTest::output_ref;
long* FBGEMMGPUBatchUnaryEmbeddingTest::table_offsets;
float* FBGEMMGPUBatchUnaryEmbeddingTest::grad_weight_ref;
float* FBGEMMGPUBatchUnaryEmbeddingTest::grad_output_ref;
TEST_F(FBGEMMGPUBatchUnaryEmbeddingTest, forward_test) {
int device_cnt;
hipGetDeviceCount(&device_cnt);
if (device_cnt == 0) {
GTEST_SKIP();
}
// gpu ptrs
long* offsets_gpu_ptr;
long* indices_gpu_ptr;
long* table_offsets_gpu_ptr;
float* embedding_table_gpu_ptr;
float* output_gpu_ptr;
// cpu ptrs
float* output_cpu_ptr = new float[T * num_task * B];
CUDA_CHECK(
hipMalloc((void**)&offsets_gpu_ptr, (indices_size + 1) * sizeof(long)));
CUDA_CHECK(hipMalloc((void**)&indices_gpu_ptr, indices_size * sizeof(long)));
CUDA_CHECK(hipMalloc(
(void**)&embedding_table_gpu_ptr,
(hash_sizes[0] + hash_sizes[1]) * num_task * sizeof(float)));
CUDA_CHECK(
hipMalloc((void**)&table_offsets_gpu_ptr, (T + 1) * sizeof(long)));
CUDA_CHECK(
hipMalloc((void**)&output_gpu_ptr, T * num_task * B * sizeof(float)));
CUDA_CHECK(hipMemcpy(
offsets_gpu_ptr,
offsets,
(indices_size + 1) * sizeof(long),
hipMemcpyKind::hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(
indices_gpu_ptr,
indices,
indices_size * sizeof(long),
hipMemcpyKind::hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(
embedding_table_gpu_ptr,
weights,
(hash_sizes[0] + hash_sizes[1]) * num_task * sizeof(float),
hipMemcpyKind::hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(
table_offsets_gpu_ptr,
table_offsets,
(T + 1) * sizeof(long),
hipMemcpyKind::hipMemcpyHostToDevice));
fbgemm_gpu_test::batched_unary_embeddings_forward(
num_task,
B,
T,
embedding_table_gpu_ptr,
table_offsets_gpu_ptr,
offsets_gpu_ptr,
indices_gpu_ptr,
output_gpu_ptr);
CUDA_CHECK(hipMemcpy(
output_cpu_ptr,
output_gpu_ptr,
T * num_task * B * sizeof(float),
hipMemcpyKind::hipMemcpyDeviceToHost));
hipFree(offsets_gpu_ptr);
hipFree(indices_gpu_ptr);
hipFree(embedding_table_gpu_ptr);
hipFree(table_offsets_gpu_ptr);
hipFree(output_gpu_ptr);
for (int i = 0; i < T * num_task * B; i++) {
ASSERT_FLOAT_EQ(output_cpu_ptr[i], output_ref[i]);
}
delete[] output_cpu_ptr;
}
TEST_F(FBGEMMGPUBatchUnaryEmbeddingTest, backward_test) {
int device_cnt;
hipGetDeviceCount(&device_cnt);
if (device_cnt == 0) {
GTEST_SKIP();
}
// gpu ptrs
long* offsets_gpu_ptr;
long* indices_gpu_ptr;
long* table_offsets_gpu_ptr;
float* grad_output_gpu_ptr;
float* grad_weight_gpu_ptr;
// cpu ptrs
float* grad_weight_cpu_ptr =
new float[(hash_sizes[0] + hash_sizes[1]) * num_task];
CUDA_CHECK(
hipMalloc((void**)&offsets_gpu_ptr, (indices_size + 1) * sizeof(long)));
CUDA_CHECK(hipMalloc((void**)&indices_gpu_ptr, indices_size * sizeof(long)));
CUDA_CHECK(hipMalloc(
(void**)&grad_output_gpu_ptr, T * num_task * B * sizeof(float)));
CUDA_CHECK(
hipMalloc((void**)&table_offsets_gpu_ptr, (T + 1) * sizeof(long)));
CUDA_CHECK(hipMalloc(
(void**)&grad_weight_gpu_ptr,
(hash_sizes[0] + hash_sizes[1]) * num_task * sizeof(float)));
CUDA_CHECK(hipMemcpy(
offsets_gpu_ptr,
offsets,
(indices_size + 1) * sizeof(long),
hipMemcpyKind::hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(
indices_gpu_ptr,
indices,
indices_size * sizeof(long),
hipMemcpyKind::hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(
grad_output_gpu_ptr,
grad_output_ref,
T * num_task * B * sizeof(float),
hipMemcpyKind::hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(
table_offsets_gpu_ptr,
table_offsets,
(T + 1) * sizeof(long),
hipMemcpyKind::hipMemcpyHostToDevice));
fbgemm_gpu_test::batched_unary_embeddings_backward(
num_task,
B,
T,
grad_output_gpu_ptr,
table_offsets_gpu_ptr,
offsets_gpu_ptr,
indices_gpu_ptr,
grad_weight_gpu_ptr);
CUDA_CHECK(hipMemcpy(
grad_weight_cpu_ptr,
grad_weight_gpu_ptr,
(hash_sizes[0] + hash_sizes[1]) * num_task * sizeof(float),
hipMemcpyKind::hipMemcpyDeviceToHost));
hipFree(offsets_gpu_ptr);
hipFree(indices_gpu_ptr);
hipFree(grad_output_gpu_ptr);
hipFree(table_offsets_gpu_ptr);
hipFree(grad_weight_gpu_ptr);
for (int i = 0; i < (hash_sizes[0] + hash_sizes[1]) * num_task; i++) {
ASSERT_NEAR(grad_weight_cpu_ptr[i], grad_weight_ref[i], 0.0002);
}
delete[] grad_weight_cpu_ptr;
}
| 54bb73c18a00be806e3732cd9173c52ccfae4abf.cu | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <gtest/gtest.h>
#include "fbgemm_gpu/batched_unary_embedding_wrappers.cuh"
#include "fbgemm_gpu/cuda_utils.cuh"
class FBGEMMGPUBatchUnaryEmbeddingTest : public ::testing::Test {
protected:
static long* indices;
static long* offsets;
static float* weights;
static int* hash_sizes;
static float* output_ref;
static long* table_offsets;
static float* grad_weight_ref;
static float* grad_output_ref;
constexpr static int indices_size = 6;
constexpr static int T = 2;
constexpr static int num_task = 2;
constexpr static int B = 3;
static void SetUpTestCase() {
hash_sizes = new int[T]{2, 3};
table_offsets = new long[T + 1]{0, 2, 5};
offsets = new long[indices_size + 1]{0, 1, 2, 3, 4, 5, 6};
indices = new long[indices_size]{1, 1, 1, 0, 2, 1};
weights = new float[(hash_sizes[0] + hash_sizes[1]) * num_task]{-0.1264,
0.2836,
-0.5619,
0.5717,
0.1725,
-0.2929,
0.2342,
0.0099,
-0.5364,
-0.4393};
output_ref = new float[T * num_task * B]{0.2836,
-0.5619,
0.2836,
0.1725,
0.2836,
0.5717,
0.2342,
0.0099,
0.2342,
-0.4393,
0.2342,
-0.5364};
grad_output_ref = new float[T * num_task * B]{-0.1434,
0.0576,
-0.0076,
-0.1141,
-0.0708,
-0.0876,
-0.0649,
-0.0071,
-0.0240,
0.1031,
0.2131,
-0.1412};
grad_weight_ref =
new float[(hash_sizes[0] + hash_sizes[1]) * num_task]{-0.0000,
-0.2218,
0.0576,
-0.0876,
-0.1141,
0.0000,
0.1243,
-0.0071,
-0.1412,
0.1031};
}
static void TearDownTestCase() {
delete[] indices;
delete[] offsets;
delete[] weights;
delete[] hash_sizes;
delete[] output_ref;
delete[] table_offsets;
delete[] grad_weight_ref;
delete[] grad_output_ref;
}
};
long* FBGEMMGPUBatchUnaryEmbeddingTest::indices;
long* FBGEMMGPUBatchUnaryEmbeddingTest::offsets;
int* FBGEMMGPUBatchUnaryEmbeddingTest::hash_sizes;
float* FBGEMMGPUBatchUnaryEmbeddingTest::weights;
float* FBGEMMGPUBatchUnaryEmbeddingTest::output_ref;
long* FBGEMMGPUBatchUnaryEmbeddingTest::table_offsets;
float* FBGEMMGPUBatchUnaryEmbeddingTest::grad_weight_ref;
float* FBGEMMGPUBatchUnaryEmbeddingTest::grad_output_ref;
TEST_F(FBGEMMGPUBatchUnaryEmbeddingTest, forward_test) {
int device_cnt;
cudaGetDeviceCount(&device_cnt);
if (device_cnt == 0) {
GTEST_SKIP();
}
// gpu ptrs
long* offsets_gpu_ptr;
long* indices_gpu_ptr;
long* table_offsets_gpu_ptr;
float* embedding_table_gpu_ptr;
float* output_gpu_ptr;
// cpu ptrs
float* output_cpu_ptr = new float[T * num_task * B];
CUDA_CHECK(
cudaMalloc((void**)&offsets_gpu_ptr, (indices_size + 1) * sizeof(long)));
CUDA_CHECK(cudaMalloc((void**)&indices_gpu_ptr, indices_size * sizeof(long)));
CUDA_CHECK(cudaMalloc(
(void**)&embedding_table_gpu_ptr,
(hash_sizes[0] + hash_sizes[1]) * num_task * sizeof(float)));
CUDA_CHECK(
cudaMalloc((void**)&table_offsets_gpu_ptr, (T + 1) * sizeof(long)));
CUDA_CHECK(
cudaMalloc((void**)&output_gpu_ptr, T * num_task * B * sizeof(float)));
CUDA_CHECK(cudaMemcpy(
offsets_gpu_ptr,
offsets,
(indices_size + 1) * sizeof(long),
cudaMemcpyKind::cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(
indices_gpu_ptr,
indices,
indices_size * sizeof(long),
cudaMemcpyKind::cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(
embedding_table_gpu_ptr,
weights,
(hash_sizes[0] + hash_sizes[1]) * num_task * sizeof(float),
cudaMemcpyKind::cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(
table_offsets_gpu_ptr,
table_offsets,
(T + 1) * sizeof(long),
cudaMemcpyKind::cudaMemcpyHostToDevice));
fbgemm_gpu_test::batched_unary_embeddings_forward(
num_task,
B,
T,
embedding_table_gpu_ptr,
table_offsets_gpu_ptr,
offsets_gpu_ptr,
indices_gpu_ptr,
output_gpu_ptr);
CUDA_CHECK(cudaMemcpy(
output_cpu_ptr,
output_gpu_ptr,
T * num_task * B * sizeof(float),
cudaMemcpyKind::cudaMemcpyDeviceToHost));
cudaFree(offsets_gpu_ptr);
cudaFree(indices_gpu_ptr);
cudaFree(embedding_table_gpu_ptr);
cudaFree(table_offsets_gpu_ptr);
cudaFree(output_gpu_ptr);
for (int i = 0; i < T * num_task * B; i++) {
ASSERT_FLOAT_EQ(output_cpu_ptr[i], output_ref[i]);
}
delete[] output_cpu_ptr;
}
TEST_F(FBGEMMGPUBatchUnaryEmbeddingTest, backward_test) {
int device_cnt;
cudaGetDeviceCount(&device_cnt);
if (device_cnt == 0) {
GTEST_SKIP();
}
// gpu ptrs
long* offsets_gpu_ptr;
long* indices_gpu_ptr;
long* table_offsets_gpu_ptr;
float* grad_output_gpu_ptr;
float* grad_weight_gpu_ptr;
// cpu ptrs
float* grad_weight_cpu_ptr =
new float[(hash_sizes[0] + hash_sizes[1]) * num_task];
CUDA_CHECK(
cudaMalloc((void**)&offsets_gpu_ptr, (indices_size + 1) * sizeof(long)));
CUDA_CHECK(cudaMalloc((void**)&indices_gpu_ptr, indices_size * sizeof(long)));
CUDA_CHECK(cudaMalloc(
(void**)&grad_output_gpu_ptr, T * num_task * B * sizeof(float)));
CUDA_CHECK(
cudaMalloc((void**)&table_offsets_gpu_ptr, (T + 1) * sizeof(long)));
CUDA_CHECK(cudaMalloc(
(void**)&grad_weight_gpu_ptr,
(hash_sizes[0] + hash_sizes[1]) * num_task * sizeof(float)));
CUDA_CHECK(cudaMemcpy(
offsets_gpu_ptr,
offsets,
(indices_size + 1) * sizeof(long),
cudaMemcpyKind::cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(
indices_gpu_ptr,
indices,
indices_size * sizeof(long),
cudaMemcpyKind::cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(
grad_output_gpu_ptr,
grad_output_ref,
T * num_task * B * sizeof(float),
cudaMemcpyKind::cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(
table_offsets_gpu_ptr,
table_offsets,
(T + 1) * sizeof(long),
cudaMemcpyKind::cudaMemcpyHostToDevice));
fbgemm_gpu_test::batched_unary_embeddings_backward(
num_task,
B,
T,
grad_output_gpu_ptr,
table_offsets_gpu_ptr,
offsets_gpu_ptr,
indices_gpu_ptr,
grad_weight_gpu_ptr);
CUDA_CHECK(cudaMemcpy(
grad_weight_cpu_ptr,
grad_weight_gpu_ptr,
(hash_sizes[0] + hash_sizes[1]) * num_task * sizeof(float),
cudaMemcpyKind::cudaMemcpyDeviceToHost));
cudaFree(offsets_gpu_ptr);
cudaFree(indices_gpu_ptr);
cudaFree(grad_output_gpu_ptr);
cudaFree(table_offsets_gpu_ptr);
cudaFree(grad_weight_gpu_ptr);
for (int i = 0; i < (hash_sizes[0] + hash_sizes[1]) * num_task; i++) {
ASSERT_NEAR(grad_weight_cpu_ptr[i], grad_weight_ref[i], 0.0002);
}
delete[] grad_weight_cpu_ptr;
}
|
atomic_add_bw.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <algorithm>
#define THREADS_PER_BLOCK 1024
#define THREADS_PER_SM 2048
#define BLOCKS_NUM 160
#define TOTAL_THREADS (THREADS_PER_BLOCK*BLOCKS_NUM)
#define WARP_SIZE 32
#define REPEAT_TIMES 16
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true){
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template <class T>
__global__ void max_flops(uint32_t *startClk, uint32_t *stopClk, T *data1, T *res) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
//register T s1 = data1[gid];
//register T s2 = data2[gid];
//register T result = 0;
// synchronize all threads
asm volatile ("bar.sync 0;");
// start timing
uint32_t start = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
for (int j=0 ; j<REPEAT_TIMES ; ++j) {
atomicAdd(&data1[gid], 10);
}
// synchronize all threads
asm volatile("bar.sync 0;");
// stop timing
uint32_t stop = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[gid] = start;
stopClk[gid] = stop;
res[gid] = data1[0];
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
int32_t *data1 = (int32_t*) malloc(TOTAL_THREADS*sizeof(int32_t));
//int32_t *data2 = (int32_t*) malloc(TOTAL_THREADS*sizeof(int32_t));
int32_t *res = (int32_t*) malloc(TOTAL_THREADS*sizeof(int32_t));
uint32_t *startClk_g;
uint32_t *stopClk_g;
int32_t *data1_g;
//int32_t *data2_g;
int32_t *res_g;
for (uint32_t i=0; i<TOTAL_THREADS; i++) {
data1[i] = (int32_t)i;
//data2[i] = (int32_t)i;
}
gpuErrchk( hipMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&data1_g, TOTAL_THREADS*sizeof(int32_t)) );
//gpuErrchk( hipMalloc(&data2_g, TOTAL_THREADS*sizeof(int32_t)) );
gpuErrchk( hipMalloc(&res_g, TOTAL_THREADS*sizeof(int32_t)) );
gpuErrchk( hipMemcpy(data1_g, data1, TOTAL_THREADS*sizeof(int32_t), hipMemcpyHostToDevice) );
//gpuErrchk( hipMemcpy(data2_g, data2, TOTAL_THREADS*sizeof(int32_t), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( max_flops<int32_t>), dim3(BLOCKS_NUM),dim3(THREADS_PER_BLOCK), 0, 0, startClk_g, stopClk_g, data1_g, res_g);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(res, res_g, TOTAL_THREADS*sizeof(int32_t), hipMemcpyDeviceToHost) );
float bw;
uint32_t total_time = *std::max_element(&stopClk[0],&stopClk[TOTAL_THREADS-1])-*std::min_element(&startClk[0],&startClk[TOTAL_THREADS-1]);
bw = ((float)(REPEAT_TIMES*TOTAL_THREADS*4)/(float)(total_time));
printf("int32 bendwidth = %f (byte/clk)\n", bw);
printf("Total Clk number = %u \n", total_time);
return 0;
}
| atomic_add_bw.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <iostream>
#include <algorithm>
#define THREADS_PER_BLOCK 1024
#define THREADS_PER_SM 2048
#define BLOCKS_NUM 160
#define TOTAL_THREADS (THREADS_PER_BLOCK*BLOCKS_NUM)
#define WARP_SIZE 32
#define REPEAT_TIMES 16
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template <class T>
__global__ void max_flops(uint32_t *startClk, uint32_t *stopClk, T *data1, T *res) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
//register T s1 = data1[gid];
//register T s2 = data2[gid];
//register T result = 0;
// synchronize all threads
asm volatile ("bar.sync 0;");
// start timing
uint32_t start = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
for (int j=0 ; j<REPEAT_TIMES ; ++j) {
atomicAdd(&data1[gid], 10);
}
// synchronize all threads
asm volatile("bar.sync 0;");
// stop timing
uint32_t stop = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[gid] = start;
stopClk[gid] = stop;
res[gid] = data1[0];
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
int32_t *data1 = (int32_t*) malloc(TOTAL_THREADS*sizeof(int32_t));
//int32_t *data2 = (int32_t*) malloc(TOTAL_THREADS*sizeof(int32_t));
int32_t *res = (int32_t*) malloc(TOTAL_THREADS*sizeof(int32_t));
uint32_t *startClk_g;
uint32_t *stopClk_g;
int32_t *data1_g;
//int32_t *data2_g;
int32_t *res_g;
for (uint32_t i=0; i<TOTAL_THREADS; i++) {
data1[i] = (int32_t)i;
//data2[i] = (int32_t)i;
}
gpuErrchk( cudaMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&data1_g, TOTAL_THREADS*sizeof(int32_t)) );
//gpuErrchk( cudaMalloc(&data2_g, TOTAL_THREADS*sizeof(int32_t)) );
gpuErrchk( cudaMalloc(&res_g, TOTAL_THREADS*sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(data1_g, data1, TOTAL_THREADS*sizeof(int32_t), cudaMemcpyHostToDevice) );
//gpuErrchk( cudaMemcpy(data2_g, data2, TOTAL_THREADS*sizeof(int32_t), cudaMemcpyHostToDevice) );
max_flops<int32_t><<<BLOCKS_NUM,THREADS_PER_BLOCK>>>(startClk_g, stopClk_g, data1_g, res_g);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(res, res_g, TOTAL_THREADS*sizeof(int32_t), cudaMemcpyDeviceToHost) );
float bw;
uint32_t total_time = *std::max_element(&stopClk[0],&stopClk[TOTAL_THREADS-1])-*std::min_element(&startClk[0],&startClk[TOTAL_THREADS-1]);
bw = ((float)(REPEAT_TIMES*TOTAL_THREADS*4)/(float)(total_time));
printf("int32 bendwidth = %f (byte/clk)\n", bw);
printf("Total Clk number = %u \n", total_time);
return 0;
}
|
19aa6268a25f25918de643b14c2d51756d05c75f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_scalarMulf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float x = 1;
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_scalarMulf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_scalarMulf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_scalarMulf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 19aa6268a25f25918de643b14c2d51756d05c75f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_scalarMulf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float x = 1;
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_scalarMulf<<<gridBlock,threadBlock>>>(n,result,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_scalarMulf<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_scalarMulf<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0b8612b55d1fc433815cdbd99b30421cf9a47496.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kLogistic2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gData = NULL;
hipMalloc(&gData, XSIZE*YSIZE);
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
unsigned int numElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kLogistic2), dim3(gridBlock),dim3(threadBlock), 0, 0, gData,target,numElements);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kLogistic2), dim3(gridBlock),dim3(threadBlock), 0, 0, gData,target,numElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kLogistic2), dim3(gridBlock),dim3(threadBlock), 0, 0, gData,target,numElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0b8612b55d1fc433815cdbd99b30421cf9a47496.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kLogistic2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gData = NULL;
cudaMalloc(&gData, XSIZE*YSIZE);
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
unsigned int numElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kLogistic2<<<gridBlock,threadBlock>>>(gData,target,numElements);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kLogistic2<<<gridBlock,threadBlock>>>(gData,target,numElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kLogistic2<<<gridBlock,threadBlock>>>(gData,target,numElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cc3635e0f927190f78b7091b990f47964324ff82.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sigmoidActivationForward.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Z = NULL;
hipMalloc(&Z, XSIZE*YSIZE);
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int Z_x_dim = 1;
int Z_y_dim = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sigmoidActivationForward), dim3(gridBlock),dim3(threadBlock), 0, 0, Z,A,Z_x_dim,Z_y_dim);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sigmoidActivationForward), dim3(gridBlock),dim3(threadBlock), 0, 0, Z,A,Z_x_dim,Z_y_dim);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sigmoidActivationForward), dim3(gridBlock),dim3(threadBlock), 0, 0, Z,A,Z_x_dim,Z_y_dim);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | cc3635e0f927190f78b7091b990f47964324ff82.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sigmoidActivationForward.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Z = NULL;
cudaMalloc(&Z, XSIZE*YSIZE);
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int Z_x_dim = 1;
int Z_y_dim = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sigmoidActivationForward<<<gridBlock,threadBlock>>>(Z,A,Z_x_dim,Z_y_dim);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sigmoidActivationForward<<<gridBlock,threadBlock>>>(Z,A,Z_x_dim,Z_y_dim);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sigmoidActivationForward<<<gridBlock,threadBlock>>>(Z,A,Z_x_dim,Z_y_dim);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7df9b10f4b73a3925c5de0b44b14d33cadd24f57.hip | // !!! This is a file automatically generated by hipify!!!
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// DEFINE / INCLUDE
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
//======================================================================================================================================================
// LIBRARIES
//======================================================================================================================================================
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <avilib.h>
#include <avimod.h>
#include <hip/hip_runtime.h>
//======================================================================================================================================================
// STRUCTURES, GLOBAL STRUCTURE VARIABLES
//======================================================================================================================================================
#include "define.c"
params_common_change common_change;
__constant__ params_common_change d_common_change;
params_common common;
__constant__ params_common d_common;
params_unique unique[ALL_POINTS]; // cannot determine size dynamically so choose more than usually needed
__constant__ params_unique d_unique[ALL_POINTS];
//======================================================================================================================================================
// KERNEL CODE
//======================================================================================================================================================
#include "kernel.hip"
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
int main(int argc, char *argv []){
//======================================================================================================================================================
// VARIABLES
//======================================================================================================================================================
// CUDA kernel execution parameters
dim3 threads;
dim3 blocks;
// counter
int i;
int frames_processed;
// frames
char* video_file_name;
avi_t* frames;
fp* frame;
//======================================================================================================================================================
// FRAME
//======================================================================================================================================================
if(argc!=4){
printf("ERROR: usage: heartwall <inputfile> <num of frames> <goldfile>\n");
exit(1);
}
const char* goldfile = argv[3];
// open movie file
video_file_name = argv[1];
frames = (avi_t*)AVI_open_input_file(video_file_name, 1); // added casting
if (frames == NULL) {
AVI_print_error((char *) "Error with AVI_open_input_file");
return -1;
}
// common
common.no_frames = AVI_video_frames(frames);
common.frame_rows = AVI_video_height(frames);
common.frame_cols = AVI_video_width(frames);
common.frame_elem = common.frame_rows * common.frame_cols;
common.frame_mem = sizeof(fp) * common.frame_elem;
// pointers
hipMalloc((void **)&common_change.d_frame, common.frame_mem);
//======================================================================================================================================================
// CHECK INPUT ARGUMENTS
//======================================================================================================================================================
frames_processed = atoi(argv[2]);
if(frames_processed<0 || frames_processed>common.no_frames){
printf("ERROR: %d is an incorrect number of frames specified, select in the range of 0-%d\n", frames_processed, common.no_frames);
return 0;
}
//======================================================================================================================================================
// HARDCODED INPUTS FROM MATLAB
//======================================================================================================================================================
//====================================================================================================
// CONSTANTS
//====================================================================================================
common.sSize = 40;
common.tSize = 25;
common.maxMove = 10;
common.alpha = 0.87;
//====================================================================================================
// ENDO POINTS
//====================================================================================================
common.endoPoints = ENDO_POINTS;
common.endo_mem = sizeof(int) * common.endoPoints;
common.endoRow = (int *)malloc(common.endo_mem);
common.endoRow[ 0] = 369;
common.endoRow[ 1] = 400;
common.endoRow[ 2] = 429;
common.endoRow[ 3] = 452;
common.endoRow[ 4] = 476;
common.endoRow[ 5] = 486;
common.endoRow[ 6] = 479;
common.endoRow[ 7] = 458;
common.endoRow[ 8] = 433;
common.endoRow[ 9] = 404;
common.endoRow[10] = 374;
common.endoRow[11] = 346;
common.endoRow[12] = 318;
common.endoRow[13] = 294;
common.endoRow[14] = 277;
common.endoRow[15] = 269;
common.endoRow[16] = 275;
common.endoRow[17] = 287;
common.endoRow[18] = 311;
common.endoRow[19] = 339;
hipMalloc((void **)&common.d_endoRow, common.endo_mem);
hipMemcpy(common.d_endoRow, common.endoRow, common.endo_mem, hipMemcpyHostToDevice);
common.endoCol = (int *)malloc(common.endo_mem);
common.endoCol[ 0] = 408;
common.endoCol[ 1] = 406;
common.endoCol[ 2] = 397;
common.endoCol[ 3] = 383;
common.endoCol[ 4] = 354;
common.endoCol[ 5] = 322;
common.endoCol[ 6] = 294;
common.endoCol[ 7] = 270;
common.endoCol[ 8] = 250;
common.endoCol[ 9] = 237;
common.endoCol[10] = 235;
common.endoCol[11] = 241;
common.endoCol[12] = 254;
common.endoCol[13] = 273;
common.endoCol[14] = 300;
common.endoCol[15] = 328;
common.endoCol[16] = 356;
common.endoCol[17] = 383;
common.endoCol[18] = 401;
common.endoCol[19] = 411;
hipMalloc((void **)&common.d_endoCol, common.endo_mem);
hipMemcpy(common.d_endoCol, common.endoCol, common.endo_mem, hipMemcpyHostToDevice);
common.tEndoRowLoc = (int *)calloc(common.endo_mem * common.no_frames, 1);
hipMalloc((void **)&common.d_tEndoRowLoc, common.endo_mem * common.no_frames);
hipMemset((void *)common.d_tEndoRowLoc, 0, common.endo_mem * common.no_frames);
common.tEndoColLoc = (int *)calloc(common.endo_mem * common.no_frames, 1);
hipMalloc((void **)&common.d_tEndoColLoc, common.endo_mem * common.no_frames);
hipMemset((void *)common.d_tEndoColLoc, 0, common.endo_mem * common.no_frames);
//====================================================================================================
// EPI POINTS
//====================================================================================================
common.epiPoints = EPI_POINTS;
common.epi_mem = sizeof(int) * common.epiPoints;
common.epiRow = (int *)malloc(common.epi_mem);
common.epiRow[ 0] = 390;
common.epiRow[ 1] = 419;
common.epiRow[ 2] = 448;
common.epiRow[ 3] = 474;
common.epiRow[ 4] = 501;
common.epiRow[ 5] = 519;
common.epiRow[ 6] = 535;
common.epiRow[ 7] = 542;
common.epiRow[ 8] = 543;
common.epiRow[ 9] = 538;
common.epiRow[10] = 528;
common.epiRow[11] = 511;
common.epiRow[12] = 491;
common.epiRow[13] = 466;
common.epiRow[14] = 438;
common.epiRow[15] = 406;
common.epiRow[16] = 376;
common.epiRow[17] = 347;
common.epiRow[18] = 318;
common.epiRow[19] = 291;
common.epiRow[20] = 275;
common.epiRow[21] = 259;
common.epiRow[22] = 256;
common.epiRow[23] = 252;
common.epiRow[24] = 252;
common.epiRow[25] = 257;
common.epiRow[26] = 266;
common.epiRow[27] = 283;
common.epiRow[28] = 305;
common.epiRow[29] = 331;
common.epiRow[30] = 360;
hipMalloc((void **)&common.d_epiRow, common.epi_mem);
hipMemcpy(common.d_epiRow, common.epiRow, common.epi_mem, hipMemcpyHostToDevice);
common.epiCol = (int *)malloc(common.epi_mem);
common.epiCol[ 0] = 457;
common.epiCol[ 1] = 454;
common.epiCol[ 2] = 446;
common.epiCol[ 3] = 431;
common.epiCol[ 4] = 411;
common.epiCol[ 5] = 388;
common.epiCol[ 6] = 361;
common.epiCol[ 7] = 331;
common.epiCol[ 8] = 301;
common.epiCol[ 9] = 273;
common.epiCol[10] = 243;
common.epiCol[11] = 218;
common.epiCol[12] = 196;
common.epiCol[13] = 178;
common.epiCol[14] = 166;
common.epiCol[15] = 157;
common.epiCol[16] = 155;
common.epiCol[17] = 165;
common.epiCol[18] = 177;
common.epiCol[19] = 197;
common.epiCol[20] = 218;
common.epiCol[21] = 248;
common.epiCol[22] = 276;
common.epiCol[23] = 304;
common.epiCol[24] = 333;
common.epiCol[25] = 361;
common.epiCol[26] = 391;
common.epiCol[27] = 415;
common.epiCol[28] = 434;
common.epiCol[29] = 448;
common.epiCol[30] = 455;
hipMalloc((void **)&common.d_epiCol, common.epi_mem);
hipMemcpy(common.d_epiCol, common.epiCol, common.epi_mem, hipMemcpyHostToDevice);
common.tEpiRowLoc = (int *)calloc(common.epi_mem * common.no_frames, 1);
hipMalloc((void **)&common.d_tEpiRowLoc, common.epi_mem * common.no_frames);
hipMemset((void *)common.d_tEpiRowLoc, 0, common.epi_mem * common.no_frames);
common.tEpiColLoc = (int *)calloc(common.epi_mem * common.no_frames, 1);
hipMalloc((void **)&common.d_tEpiColLoc, common.epi_mem * common.no_frames);
hipMemset((void *)common.d_tEpiColLoc, 0, common.epi_mem * common.no_frames);
//====================================================================================================
// ALL POINTS
//====================================================================================================
common.allPoints = ALL_POINTS;
//======================================================================================================================================================
// TEMPLATE SIZES
//======================================================================================================================================================
// common
common.in_rows = common.tSize + 1 + common.tSize;
common.in_cols = common.in_rows;
common.in_elem = common.in_rows * common.in_cols;
common.in_mem = sizeof(fp) * common.in_elem;
//======================================================================================================================================================
// CREATE ARRAY OF TEMPLATES FOR ALL POINTS
//======================================================================================================================================================
// common
hipMalloc((void **)&common.d_endoT, common.in_mem * common.endoPoints);
hipMalloc((void **)&common.d_epiT, common.in_mem * common.epiPoints);
//======================================================================================================================================================
// SPECIFIC TO ENDO OR EPI TO BE SET HERE
//======================================================================================================================================================
for(i=0; i<common.endoPoints; i++){
unique[i].point_no = i;
unique[i].d_Row = common.d_endoRow;
unique[i].d_Col = common.d_endoCol;
unique[i].d_tRowLoc = common.d_tEndoRowLoc;
unique[i].d_tColLoc = common.d_tEndoColLoc;
unique[i].d_T = common.d_endoT;
}
for(i=common.endoPoints; i<common.allPoints; i++){
unique[i].point_no = i-common.endoPoints;
unique[i].d_Row = common.d_epiRow;
unique[i].d_Col = common.d_epiCol;
unique[i].d_tRowLoc = common.d_tEpiRowLoc;
unique[i].d_tColLoc = common.d_tEpiColLoc;
unique[i].d_T = common.d_epiT;
}
//======================================================================================================================================================
// RIGHT TEMPLATE FROM TEMPLATE ARRAY
//======================================================================================================================================================
// pointers
for(i=0; i<common.allPoints; i++){
unique[i].in_pointer = unique[i].point_no * common.in_elem;
}
//======================================================================================================================================================
// AREA AROUND POINT FROM FRAME
//======================================================================================================================================================
// common
common.in2_rows = 2 * common.sSize + 1;
common.in2_cols = 2 * common.sSize + 1;
common.in2_elem = common.in2_rows * common.in2_cols;
common.in2_mem = sizeof(float) * common.in2_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2, common.in2_mem);
}
//======================================================================================================================================================
// CONVOLUTION
//======================================================================================================================================================
// common
common.conv_rows = common.in_rows + common.in2_rows - 1; // number of rows in I
common.conv_cols = common.in_cols + common.in2_cols - 1; // number of columns in I
common.conv_elem = common.conv_rows * common.conv_cols; // number of elements
common.conv_mem = sizeof(float) * common.conv_elem;
common.ioffset = 0;
common.joffset = 0;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_conv, common.conv_mem);
}
//======================================================================================================================================================
// CUMULATIVE SUM
//======================================================================================================================================================
//====================================================================================================
// PADDING OF ARRAY, VERTICAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_add_rows = common.in_rows;
common.in2_pad_add_cols = common.in_cols;
common.in2_pad_cumv_rows = common.in2_rows + 2*common.in2_pad_add_rows;
common.in2_pad_cumv_cols = common.in2_cols + 2*common.in2_pad_add_cols;
common.in2_pad_cumv_elem = common.in2_pad_cumv_rows * common.in2_pad_cumv_cols;
common.in2_pad_cumv_mem = sizeof(float) * common.in2_pad_cumv_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_pad_cumv, common.in2_pad_cumv_mem);
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_pad_cumv_sel_rowlow = 1 + common.in_rows; // (1 to n+1)
common.in2_pad_cumv_sel_rowhig = common.in2_pad_cumv_rows - 1;
common.in2_pad_cumv_sel_collow = 1;
common.in2_pad_cumv_sel_colhig = common.in2_pad_cumv_cols;
common.in2_pad_cumv_sel_rows = common.in2_pad_cumv_sel_rowhig - common.in2_pad_cumv_sel_rowlow + 1;
common.in2_pad_cumv_sel_cols = common.in2_pad_cumv_sel_colhig - common.in2_pad_cumv_sel_collow + 1;
common.in2_pad_cumv_sel_elem = common.in2_pad_cumv_sel_rows * common.in2_pad_cumv_sel_cols;
common.in2_pad_cumv_sel_mem = sizeof(float) * common.in2_pad_cumv_sel_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_pad_cumv_sel, common.in2_pad_cumv_sel_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_cumv_sel2_rowlow = 1;
common.in2_pad_cumv_sel2_rowhig = common.in2_pad_cumv_rows - common.in_rows - 1;
common.in2_pad_cumv_sel2_collow = 1;
common.in2_pad_cumv_sel2_colhig = common.in2_pad_cumv_cols;
common.in2_sub_cumh_rows = common.in2_pad_cumv_sel2_rowhig - common.in2_pad_cumv_sel2_rowlow + 1;
common.in2_sub_cumh_cols = common.in2_pad_cumv_sel2_colhig - common.in2_pad_cumv_sel2_collow + 1;
common.in2_sub_cumh_elem = common.in2_sub_cumh_rows * common.in2_sub_cumh_cols;
common.in2_sub_cumh_mem = sizeof(float) * common.in2_sub_cumh_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sub_cumh, common.in2_sub_cumh_mem);
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_sub_cumh_sel_rowlow = 1;
common.in2_sub_cumh_sel_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel_collow = 1 + common.in_cols;
common.in2_sub_cumh_sel_colhig = common.in2_sub_cumh_cols - 1;
common.in2_sub_cumh_sel_rows = common.in2_sub_cumh_sel_rowhig - common.in2_sub_cumh_sel_rowlow + 1;
common.in2_sub_cumh_sel_cols = common.in2_sub_cumh_sel_colhig - common.in2_sub_cumh_sel_collow + 1;
common.in2_sub_cumh_sel_elem = common.in2_sub_cumh_sel_rows * common.in2_sub_cumh_sel_cols;
common.in2_sub_cumh_sel_mem = sizeof(float) * common.in2_sub_cumh_sel_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sub_cumh_sel, common.in2_sub_cumh_sel_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sub_cumh_sel2_rowlow = 1;
common.in2_sub_cumh_sel2_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel2_collow = 1;
common.in2_sub_cumh_sel2_colhig = common.in2_sub_cumh_cols - common.in_cols - 1;
common.in2_sub2_rows = common.in2_sub_cumh_sel2_rowhig - common.in2_sub_cumh_sel2_rowlow + 1;
common.in2_sub2_cols = common.in2_sub_cumh_sel2_colhig - common.in2_sub_cumh_sel2_collow + 1;
common.in2_sub2_elem = common.in2_sub2_rows * common.in2_sub2_cols;
common.in2_sub2_mem = sizeof(float) * common.in2_sub2_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sub2, common.in2_sub2_mem);
}
//======================================================================================================================================================
// CUMULATIVE SUM 2
//======================================================================================================================================================
//====================================================================================================
// MULTIPLICATION
//====================================================================================================
// common
common.in2_sqr_rows = common.in2_rows;
common.in2_sqr_cols = common.in2_cols;
common.in2_sqr_elem = common.in2_elem;
common.in2_sqr_mem = common.in2_mem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sqr, common.in2_sqr_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sqr_sub2_rows = common.in2_sub2_rows;
common.in2_sqr_sub2_cols = common.in2_sub2_cols;
common.in2_sqr_sub2_elem = common.in2_sub2_elem;
common.in2_sqr_sub2_mem = common.in2_sub2_mem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in2_sqr_sub2, common.in2_sqr_sub2_mem);
}
//======================================================================================================================================================
// FINAL
//======================================================================================================================================================
// common
common.in_sqr_rows = common.in_rows;
common.in_sqr_cols = common.in_cols;
common.in_sqr_elem = common.in_elem;
common.in_sqr_mem = common.in_mem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_in_sqr, common.in_sqr_mem);
}
//======================================================================================================================================================
// TEMPLATE MASK CREATE
//======================================================================================================================================================
// common
common.tMask_rows = common.in_rows + (common.sSize+1+common.sSize) - 1;
common.tMask_cols = common.tMask_rows;
common.tMask_elem = common.tMask_rows * common.tMask_cols;
common.tMask_mem = sizeof(float) * common.tMask_elem;
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_tMask, common.tMask_mem);
}
//======================================================================================================================================================
// POINT MASK INITIALIZE
//======================================================================================================================================================
// common
common.mask_rows = common.maxMove;
common.mask_cols = common.mask_rows;
common.mask_elem = common.mask_rows * common.mask_cols;
common.mask_mem = sizeof(float) * common.mask_elem;
//======================================================================================================================================================
// MASK CONVOLUTION
//======================================================================================================================================================
// common
common.mask_conv_rows = common.tMask_rows; // number of rows in I
common.mask_conv_cols = common.tMask_cols; // number of columns in I
common.mask_conv_elem = common.mask_conv_rows * common.mask_conv_cols; // number of elements
common.mask_conv_mem = sizeof(float) * common.mask_conv_elem;
common.mask_conv_ioffset = (common.mask_rows-1)/2;
if((common.mask_rows-1) % 2 > 0.5){
common.mask_conv_ioffset = common.mask_conv_ioffset + 1;
}
common.mask_conv_joffset = (common.mask_cols-1)/2;
if((common.mask_cols-1) % 2 > 0.5){
common.mask_conv_joffset = common.mask_conv_joffset + 1;
}
// pointers
for(i=0; i<common.allPoints; i++){
hipMalloc((void **)&unique[i].d_mask_conv, common.mask_conv_mem);
}
//======================================================================================================================================================
// KERNEL
//======================================================================================================================================================
//====================================================================================================
// THREAD BLOCK
//====================================================================================================
// All kernels operations within kernel use same max size of threads. Size of block size is set to the size appropriate for max size operation (on padded matrix). Other use subsets of that.
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks.x = common.allPoints; // define the number of blocks in the grid
blocks.y = 1;
//====================================================================================================
// COPY ARGUMENTS
//====================================================================================================
hipMemcpyToSymbol(d_common, &common, sizeof(params_common));
hipMemcpyToSymbol(d_unique, &unique, sizeof(params_unique)*ALL_POINTS);
//====================================================================================================
// PRINT FRAME PROGRESS START
//====================================================================================================
printf("frame progress: ");
fflush(NULL);
//====================================================================================================
// LAUNCH
//====================================================================================================
for(common_change.frame_no=0; common_change.frame_no<frames_processed; common_change.frame_no++){
// Extract a cropped version of the first frame from the video file
frame = get_frame( frames, // pointer to video file
common_change.frame_no, // number of frame that needs to be returned
0, // cropped?
0, // scaled?
1); // converted
// copy frame to GPU memory
hipMemcpy(common_change.d_frame, frame, common.frame_mem, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_common_change, &common_change, sizeof(params_common_change));
// launch GPU kernel
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), 0, 0, );
// free frame after each loop iteration, since AVI library allocates memory for every frame fetched
free(frame);
// print frame progress
printf("%d ", common_change.frame_no);
fflush(NULL);
}
//====================================================================================================
// PRINT FRAME PROGRESS END
//====================================================================================================
printf("\n");
fflush(NULL);
//====================================================================================================
// OUTPUT
//====================================================================================================
hipMemcpy(common.tEndoRowLoc, common.d_tEndoRowLoc, common.endo_mem * common.no_frames, hipMemcpyDeviceToHost);
hipMemcpy(common.tEndoColLoc, common.d_tEndoColLoc, common.endo_mem * common.no_frames, hipMemcpyDeviceToHost);
hipMemcpy(common.tEpiRowLoc, common.d_tEpiRowLoc, common.epi_mem * common.no_frames, hipMemcpyDeviceToHost);
hipMemcpy(common.tEpiColLoc, common.d_tEpiColLoc, common.epi_mem * common.no_frames, hipMemcpyDeviceToHost);
FILE *ofile = fopen("result.txt", "w");
for (int x = 0; x < common.endo_mem * common.no_frames / sizeof(int); x++) {
printf("common.tEndoRowLoc[%d] = %d\n", x, common.tEndoRowLoc[x]);
printf("common.tEndoColLoc[%d] = %d\n", x, common.tEndoColLoc[x]);
fprintf(ofile, "common.tEndoRowLoc[%d] = %d\n", x, common.tEndoRowLoc[x]);
fprintf(ofile, "common.tEndoColLoc[%d] = %d\n", x, common.tEndoColLoc[x]);
}
for (int x = 0; x < common.epi_mem * common.no_frames / sizeof(int); x++) {
printf("common.tEpiRowLoc[%d] = %d\n", x, common.tEpiRowLoc[x]);
printf("common.tEpiColLoc[%d] = %d\n", x, common.tEpiColLoc[x]);
fprintf(ofile, "common.tEpiRowLoc[%d] = %d\n", x, common.tEpiRowLoc[x]);
fprintf(ofile, "common.tEpiColLoc[%d] = %d\n", x, common.tEpiColLoc[x]);
}
fclose(ofile);
if(goldfile){
FILE *gold = fopen(goldfile, "r");
FILE *result = fopen("result.txt", "r");
int result_error=0;
while(!feof(gold)&&!feof(result)){
if (fgetc(gold)!=fgetc(result)) {
result_error = 1;
break;
}
}
if((feof(gold)^feof(result)) | result_error) {
printf("\nFAILED\n");
} else {
printf("\nPASSED\n");
}
fclose(gold);
fclose(result);
}
//======================================================================================================================================================
// DEALLOCATION
//======================================================================================================================================================
//====================================================================================================
// COMMON
//====================================================================================================
// frame
hipFree(common_change.d_frame);
// endo points
free(common.endoRow);
free(common.endoCol);
free(common.tEndoRowLoc);
free(common.tEndoColLoc);
hipFree(common.d_endoRow);
hipFree(common.d_endoCol);
hipFree(common.d_tEndoRowLoc);
hipFree(common.d_tEndoColLoc);
hipFree(common.d_endoT);
// epi points
free(common.epiRow);
free(common.epiCol);
free(common.tEpiRowLoc);
free(common.tEpiColLoc);
hipFree(common.d_epiRow);
hipFree(common.d_epiCol);
hipFree(common.d_tEpiRowLoc);
hipFree(common.d_tEpiColLoc);
hipFree(common.d_epiT);
//====================================================================================================
// POINTERS
//====================================================================================================
for(i=0; i<common.allPoints; i++){
hipFree(unique[i].d_in2);
hipFree(unique[i].d_conv);
hipFree(unique[i].d_in2_pad_cumv);
hipFree(unique[i].d_in2_pad_cumv_sel);
hipFree(unique[i].d_in2_sub_cumh);
hipFree(unique[i].d_in2_sub_cumh_sel);
hipFree(unique[i].d_in2_sub2);
hipFree(unique[i].d_in2_sqr);
hipFree(unique[i].d_in2_sqr_sub2);
hipFree(unique[i].d_in_sqr);
hipFree(unique[i].d_tMask);
hipFree(unique[i].d_mask_conv);
}
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
| 7df9b10f4b73a3925c5de0b44b14d33cadd24f57.cu | //===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// DEFINE / INCLUDE
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
//======================================================================================================================================================
// LIBRARIES
//======================================================================================================================================================
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <avilib.h>
#include <avimod.h>
#include <cuda.h>
//======================================================================================================================================================
// STRUCTURES, GLOBAL STRUCTURE VARIABLES
//======================================================================================================================================================
#include "define.c"
params_common_change common_change;
__constant__ params_common_change d_common_change;
params_common common;
__constant__ params_common d_common;
params_unique unique[ALL_POINTS]; // cannot determine size dynamically so choose more than usually needed
__constant__ params_unique d_unique[ALL_POINTS];
//======================================================================================================================================================
// KERNEL CODE
//======================================================================================================================================================
#include "kernel.cu"
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
int main(int argc, char *argv []){
//======================================================================================================================================================
// VARIABLES
//======================================================================================================================================================
// CUDA kernel execution parameters
dim3 threads;
dim3 blocks;
// counter
int i;
int frames_processed;
// frames
char* video_file_name;
avi_t* frames;
fp* frame;
//======================================================================================================================================================
// FRAME
//======================================================================================================================================================
if(argc!=4){
printf("ERROR: usage: heartwall <inputfile> <num of frames> <goldfile>\n");
exit(1);
}
const char* goldfile = argv[3];
// open movie file
video_file_name = argv[1];
frames = (avi_t*)AVI_open_input_file(video_file_name, 1); // added casting
if (frames == NULL) {
AVI_print_error((char *) "Error with AVI_open_input_file");
return -1;
}
// common
common.no_frames = AVI_video_frames(frames);
common.frame_rows = AVI_video_height(frames);
common.frame_cols = AVI_video_width(frames);
common.frame_elem = common.frame_rows * common.frame_cols;
common.frame_mem = sizeof(fp) * common.frame_elem;
// pointers
cudaMalloc((void **)&common_change.d_frame, common.frame_mem);
//======================================================================================================================================================
// CHECK INPUT ARGUMENTS
//======================================================================================================================================================
frames_processed = atoi(argv[2]);
if(frames_processed<0 || frames_processed>common.no_frames){
printf("ERROR: %d is an incorrect number of frames specified, select in the range of 0-%d\n", frames_processed, common.no_frames);
return 0;
}
//======================================================================================================================================================
// HARDCODED INPUTS FROM MATLAB
//======================================================================================================================================================
//====================================================================================================
// CONSTANTS
//====================================================================================================
common.sSize = 40;
common.tSize = 25;
common.maxMove = 10;
common.alpha = 0.87;
//====================================================================================================
// ENDO POINTS
//====================================================================================================
common.endoPoints = ENDO_POINTS;
common.endo_mem = sizeof(int) * common.endoPoints;
common.endoRow = (int *)malloc(common.endo_mem);
common.endoRow[ 0] = 369;
common.endoRow[ 1] = 400;
common.endoRow[ 2] = 429;
common.endoRow[ 3] = 452;
common.endoRow[ 4] = 476;
common.endoRow[ 5] = 486;
common.endoRow[ 6] = 479;
common.endoRow[ 7] = 458;
common.endoRow[ 8] = 433;
common.endoRow[ 9] = 404;
common.endoRow[10] = 374;
common.endoRow[11] = 346;
common.endoRow[12] = 318;
common.endoRow[13] = 294;
common.endoRow[14] = 277;
common.endoRow[15] = 269;
common.endoRow[16] = 275;
common.endoRow[17] = 287;
common.endoRow[18] = 311;
common.endoRow[19] = 339;
cudaMalloc((void **)&common.d_endoRow, common.endo_mem);
cudaMemcpy(common.d_endoRow, common.endoRow, common.endo_mem, cudaMemcpyHostToDevice);
common.endoCol = (int *)malloc(common.endo_mem);
common.endoCol[ 0] = 408;
common.endoCol[ 1] = 406;
common.endoCol[ 2] = 397;
common.endoCol[ 3] = 383;
common.endoCol[ 4] = 354;
common.endoCol[ 5] = 322;
common.endoCol[ 6] = 294;
common.endoCol[ 7] = 270;
common.endoCol[ 8] = 250;
common.endoCol[ 9] = 237;
common.endoCol[10] = 235;
common.endoCol[11] = 241;
common.endoCol[12] = 254;
common.endoCol[13] = 273;
common.endoCol[14] = 300;
common.endoCol[15] = 328;
common.endoCol[16] = 356;
common.endoCol[17] = 383;
common.endoCol[18] = 401;
common.endoCol[19] = 411;
cudaMalloc((void **)&common.d_endoCol, common.endo_mem);
cudaMemcpy(common.d_endoCol, common.endoCol, common.endo_mem, cudaMemcpyHostToDevice);
common.tEndoRowLoc = (int *)calloc(common.endo_mem * common.no_frames, 1);
cudaMalloc((void **)&common.d_tEndoRowLoc, common.endo_mem * common.no_frames);
cudaMemset((void *)common.d_tEndoRowLoc, 0, common.endo_mem * common.no_frames);
common.tEndoColLoc = (int *)calloc(common.endo_mem * common.no_frames, 1);
cudaMalloc((void **)&common.d_tEndoColLoc, common.endo_mem * common.no_frames);
cudaMemset((void *)common.d_tEndoColLoc, 0, common.endo_mem * common.no_frames);
//====================================================================================================
// EPI POINTS
//====================================================================================================
common.epiPoints = EPI_POINTS;
common.epi_mem = sizeof(int) * common.epiPoints;
common.epiRow = (int *)malloc(common.epi_mem);
common.epiRow[ 0] = 390;
common.epiRow[ 1] = 419;
common.epiRow[ 2] = 448;
common.epiRow[ 3] = 474;
common.epiRow[ 4] = 501;
common.epiRow[ 5] = 519;
common.epiRow[ 6] = 535;
common.epiRow[ 7] = 542;
common.epiRow[ 8] = 543;
common.epiRow[ 9] = 538;
common.epiRow[10] = 528;
common.epiRow[11] = 511;
common.epiRow[12] = 491;
common.epiRow[13] = 466;
common.epiRow[14] = 438;
common.epiRow[15] = 406;
common.epiRow[16] = 376;
common.epiRow[17] = 347;
common.epiRow[18] = 318;
common.epiRow[19] = 291;
common.epiRow[20] = 275;
common.epiRow[21] = 259;
common.epiRow[22] = 256;
common.epiRow[23] = 252;
common.epiRow[24] = 252;
common.epiRow[25] = 257;
common.epiRow[26] = 266;
common.epiRow[27] = 283;
common.epiRow[28] = 305;
common.epiRow[29] = 331;
common.epiRow[30] = 360;
cudaMalloc((void **)&common.d_epiRow, common.epi_mem);
cudaMemcpy(common.d_epiRow, common.epiRow, common.epi_mem, cudaMemcpyHostToDevice);
common.epiCol = (int *)malloc(common.epi_mem);
common.epiCol[ 0] = 457;
common.epiCol[ 1] = 454;
common.epiCol[ 2] = 446;
common.epiCol[ 3] = 431;
common.epiCol[ 4] = 411;
common.epiCol[ 5] = 388;
common.epiCol[ 6] = 361;
common.epiCol[ 7] = 331;
common.epiCol[ 8] = 301;
common.epiCol[ 9] = 273;
common.epiCol[10] = 243;
common.epiCol[11] = 218;
common.epiCol[12] = 196;
common.epiCol[13] = 178;
common.epiCol[14] = 166;
common.epiCol[15] = 157;
common.epiCol[16] = 155;
common.epiCol[17] = 165;
common.epiCol[18] = 177;
common.epiCol[19] = 197;
common.epiCol[20] = 218;
common.epiCol[21] = 248;
common.epiCol[22] = 276;
common.epiCol[23] = 304;
common.epiCol[24] = 333;
common.epiCol[25] = 361;
common.epiCol[26] = 391;
common.epiCol[27] = 415;
common.epiCol[28] = 434;
common.epiCol[29] = 448;
common.epiCol[30] = 455;
cudaMalloc((void **)&common.d_epiCol, common.epi_mem);
cudaMemcpy(common.d_epiCol, common.epiCol, common.epi_mem, cudaMemcpyHostToDevice);
common.tEpiRowLoc = (int *)calloc(common.epi_mem * common.no_frames, 1);
cudaMalloc((void **)&common.d_tEpiRowLoc, common.epi_mem * common.no_frames);
cudaMemset((void *)common.d_tEpiRowLoc, 0, common.epi_mem * common.no_frames);
common.tEpiColLoc = (int *)calloc(common.epi_mem * common.no_frames, 1);
cudaMalloc((void **)&common.d_tEpiColLoc, common.epi_mem * common.no_frames);
cudaMemset((void *)common.d_tEpiColLoc, 0, common.epi_mem * common.no_frames);
//====================================================================================================
// ALL POINTS
//====================================================================================================
common.allPoints = ALL_POINTS;
//======================================================================================================================================================
// TEMPLATE SIZES
//======================================================================================================================================================
// common
common.in_rows = common.tSize + 1 + common.tSize;
common.in_cols = common.in_rows;
common.in_elem = common.in_rows * common.in_cols;
common.in_mem = sizeof(fp) * common.in_elem;
//======================================================================================================================================================
// CREATE ARRAY OF TEMPLATES FOR ALL POINTS
//======================================================================================================================================================
// common
cudaMalloc((void **)&common.d_endoT, common.in_mem * common.endoPoints);
cudaMalloc((void **)&common.d_epiT, common.in_mem * common.epiPoints);
//======================================================================================================================================================
// SPECIFIC TO ENDO OR EPI TO BE SET HERE
//======================================================================================================================================================
for(i=0; i<common.endoPoints; i++){
unique[i].point_no = i;
unique[i].d_Row = common.d_endoRow;
unique[i].d_Col = common.d_endoCol;
unique[i].d_tRowLoc = common.d_tEndoRowLoc;
unique[i].d_tColLoc = common.d_tEndoColLoc;
unique[i].d_T = common.d_endoT;
}
for(i=common.endoPoints; i<common.allPoints; i++){
unique[i].point_no = i-common.endoPoints;
unique[i].d_Row = common.d_epiRow;
unique[i].d_Col = common.d_epiCol;
unique[i].d_tRowLoc = common.d_tEpiRowLoc;
unique[i].d_tColLoc = common.d_tEpiColLoc;
unique[i].d_T = common.d_epiT;
}
//======================================================================================================================================================
// RIGHT TEMPLATE FROM TEMPLATE ARRAY
//======================================================================================================================================================
// pointers
for(i=0; i<common.allPoints; i++){
unique[i].in_pointer = unique[i].point_no * common.in_elem;
}
//======================================================================================================================================================
// AREA AROUND POINT FROM FRAME
//======================================================================================================================================================
// common
common.in2_rows = 2 * common.sSize + 1;
common.in2_cols = 2 * common.sSize + 1;
common.in2_elem = common.in2_rows * common.in2_cols;
common.in2_mem = sizeof(float) * common.in2_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2, common.in2_mem);
}
//======================================================================================================================================================
// CONVOLUTION
//======================================================================================================================================================
// common
common.conv_rows = common.in_rows + common.in2_rows - 1; // number of rows in I
common.conv_cols = common.in_cols + common.in2_cols - 1; // number of columns in I
common.conv_elem = common.conv_rows * common.conv_cols; // number of elements
common.conv_mem = sizeof(float) * common.conv_elem;
common.ioffset = 0;
common.joffset = 0;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_conv, common.conv_mem);
}
//======================================================================================================================================================
// CUMULATIVE SUM
//======================================================================================================================================================
//====================================================================================================
// PADDING OF ARRAY, VERTICAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_add_rows = common.in_rows;
common.in2_pad_add_cols = common.in_cols;
common.in2_pad_cumv_rows = common.in2_rows + 2*common.in2_pad_add_rows;
common.in2_pad_cumv_cols = common.in2_cols + 2*common.in2_pad_add_cols;
common.in2_pad_cumv_elem = common.in2_pad_cumv_rows * common.in2_pad_cumv_cols;
common.in2_pad_cumv_mem = sizeof(float) * common.in2_pad_cumv_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_pad_cumv, common.in2_pad_cumv_mem);
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_pad_cumv_sel_rowlow = 1 + common.in_rows; // (1 to n+1)
common.in2_pad_cumv_sel_rowhig = common.in2_pad_cumv_rows - 1;
common.in2_pad_cumv_sel_collow = 1;
common.in2_pad_cumv_sel_colhig = common.in2_pad_cumv_cols;
common.in2_pad_cumv_sel_rows = common.in2_pad_cumv_sel_rowhig - common.in2_pad_cumv_sel_rowlow + 1;
common.in2_pad_cumv_sel_cols = common.in2_pad_cumv_sel_colhig - common.in2_pad_cumv_sel_collow + 1;
common.in2_pad_cumv_sel_elem = common.in2_pad_cumv_sel_rows * common.in2_pad_cumv_sel_cols;
common.in2_pad_cumv_sel_mem = sizeof(float) * common.in2_pad_cumv_sel_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_pad_cumv_sel, common.in2_pad_cumv_sel_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_cumv_sel2_rowlow = 1;
common.in2_pad_cumv_sel2_rowhig = common.in2_pad_cumv_rows - common.in_rows - 1;
common.in2_pad_cumv_sel2_collow = 1;
common.in2_pad_cumv_sel2_colhig = common.in2_pad_cumv_cols;
common.in2_sub_cumh_rows = common.in2_pad_cumv_sel2_rowhig - common.in2_pad_cumv_sel2_rowlow + 1;
common.in2_sub_cumh_cols = common.in2_pad_cumv_sel2_colhig - common.in2_pad_cumv_sel2_collow + 1;
common.in2_sub_cumh_elem = common.in2_sub_cumh_rows * common.in2_sub_cumh_cols;
common.in2_sub_cumh_mem = sizeof(float) * common.in2_sub_cumh_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sub_cumh, common.in2_sub_cumh_mem);
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_sub_cumh_sel_rowlow = 1;
common.in2_sub_cumh_sel_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel_collow = 1 + common.in_cols;
common.in2_sub_cumh_sel_colhig = common.in2_sub_cumh_cols - 1;
common.in2_sub_cumh_sel_rows = common.in2_sub_cumh_sel_rowhig - common.in2_sub_cumh_sel_rowlow + 1;
common.in2_sub_cumh_sel_cols = common.in2_sub_cumh_sel_colhig - common.in2_sub_cumh_sel_collow + 1;
common.in2_sub_cumh_sel_elem = common.in2_sub_cumh_sel_rows * common.in2_sub_cumh_sel_cols;
common.in2_sub_cumh_sel_mem = sizeof(float) * common.in2_sub_cumh_sel_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sub_cumh_sel, common.in2_sub_cumh_sel_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sub_cumh_sel2_rowlow = 1;
common.in2_sub_cumh_sel2_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel2_collow = 1;
common.in2_sub_cumh_sel2_colhig = common.in2_sub_cumh_cols - common.in_cols - 1;
common.in2_sub2_rows = common.in2_sub_cumh_sel2_rowhig - common.in2_sub_cumh_sel2_rowlow + 1;
common.in2_sub2_cols = common.in2_sub_cumh_sel2_colhig - common.in2_sub_cumh_sel2_collow + 1;
common.in2_sub2_elem = common.in2_sub2_rows * common.in2_sub2_cols;
common.in2_sub2_mem = sizeof(float) * common.in2_sub2_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sub2, common.in2_sub2_mem);
}
//======================================================================================================================================================
// CUMULATIVE SUM 2
//======================================================================================================================================================
//====================================================================================================
// MULTIPLICATION
//====================================================================================================
// common
common.in2_sqr_rows = common.in2_rows;
common.in2_sqr_cols = common.in2_cols;
common.in2_sqr_elem = common.in2_elem;
common.in2_sqr_mem = common.in2_mem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sqr, common.in2_sqr_mem);
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sqr_sub2_rows = common.in2_sub2_rows;
common.in2_sqr_sub2_cols = common.in2_sub2_cols;
common.in2_sqr_sub2_elem = common.in2_sub2_elem;
common.in2_sqr_sub2_mem = common.in2_sub2_mem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in2_sqr_sub2, common.in2_sqr_sub2_mem);
}
//======================================================================================================================================================
// FINAL
//======================================================================================================================================================
// common
common.in_sqr_rows = common.in_rows;
common.in_sqr_cols = common.in_cols;
common.in_sqr_elem = common.in_elem;
common.in_sqr_mem = common.in_mem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_in_sqr, common.in_sqr_mem);
}
//======================================================================================================================================================
// TEMPLATE MASK CREATE
//======================================================================================================================================================
// common
common.tMask_rows = common.in_rows + (common.sSize+1+common.sSize) - 1;
common.tMask_cols = common.tMask_rows;
common.tMask_elem = common.tMask_rows * common.tMask_cols;
common.tMask_mem = sizeof(float) * common.tMask_elem;
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_tMask, common.tMask_mem);
}
//======================================================================================================================================================
// POINT MASK INITIALIZE
//======================================================================================================================================================
// common
common.mask_rows = common.maxMove;
common.mask_cols = common.mask_rows;
common.mask_elem = common.mask_rows * common.mask_cols;
common.mask_mem = sizeof(float) * common.mask_elem;
//======================================================================================================================================================
// MASK CONVOLUTION
//======================================================================================================================================================
// common
common.mask_conv_rows = common.tMask_rows; // number of rows in I
common.mask_conv_cols = common.tMask_cols; // number of columns in I
common.mask_conv_elem = common.mask_conv_rows * common.mask_conv_cols; // number of elements
common.mask_conv_mem = sizeof(float) * common.mask_conv_elem;
common.mask_conv_ioffset = (common.mask_rows-1)/2;
if((common.mask_rows-1) % 2 > 0.5){
common.mask_conv_ioffset = common.mask_conv_ioffset + 1;
}
common.mask_conv_joffset = (common.mask_cols-1)/2;
if((common.mask_cols-1) % 2 > 0.5){
common.mask_conv_joffset = common.mask_conv_joffset + 1;
}
// pointers
for(i=0; i<common.allPoints; i++){
cudaMalloc((void **)&unique[i].d_mask_conv, common.mask_conv_mem);
}
//======================================================================================================================================================
// KERNEL
//======================================================================================================================================================
//====================================================================================================
// THREAD BLOCK
//====================================================================================================
// All kernels operations within kernel use same max size of threads. Size of block size is set to the size appropriate for max size operation (on padded matrix). Other use subsets of that.
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks.x = common.allPoints; // define the number of blocks in the grid
blocks.y = 1;
//====================================================================================================
// COPY ARGUMENTS
//====================================================================================================
cudaMemcpyToSymbol(d_common, &common, sizeof(params_common));
cudaMemcpyToSymbol(d_unique, &unique, sizeof(params_unique)*ALL_POINTS);
//====================================================================================================
// PRINT FRAME PROGRESS START
//====================================================================================================
printf("frame progress: ");
fflush(NULL);
//====================================================================================================
// LAUNCH
//====================================================================================================
for(common_change.frame_no=0; common_change.frame_no<frames_processed; common_change.frame_no++){
// Extract a cropped version of the first frame from the video file
frame = get_frame( frames, // pointer to video file
common_change.frame_no, // number of frame that needs to be returned
0, // cropped?
0, // scaled?
1); // converted
// copy frame to GPU memory
cudaMemcpy(common_change.d_frame, frame, common.frame_mem, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_common_change, &common_change, sizeof(params_common_change));
// launch GPU kernel
kernel<<<blocks, threads>>>();
// free frame after each loop iteration, since AVI library allocates memory for every frame fetched
free(frame);
// print frame progress
printf("%d ", common_change.frame_no);
fflush(NULL);
}
//====================================================================================================
// PRINT FRAME PROGRESS END
//====================================================================================================
printf("\n");
fflush(NULL);
//====================================================================================================
// OUTPUT
//====================================================================================================
cudaMemcpy(common.tEndoRowLoc, common.d_tEndoRowLoc, common.endo_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(common.tEndoColLoc, common.d_tEndoColLoc, common.endo_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(common.tEpiRowLoc, common.d_tEpiRowLoc, common.epi_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(common.tEpiColLoc, common.d_tEpiColLoc, common.epi_mem * common.no_frames, cudaMemcpyDeviceToHost);
FILE *ofile = fopen("result.txt", "w");
for (int x = 0; x < common.endo_mem * common.no_frames / sizeof(int); x++) {
printf("common.tEndoRowLoc[%d] = %d\n", x, common.tEndoRowLoc[x]);
printf("common.tEndoColLoc[%d] = %d\n", x, common.tEndoColLoc[x]);
fprintf(ofile, "common.tEndoRowLoc[%d] = %d\n", x, common.tEndoRowLoc[x]);
fprintf(ofile, "common.tEndoColLoc[%d] = %d\n", x, common.tEndoColLoc[x]);
}
for (int x = 0; x < common.epi_mem * common.no_frames / sizeof(int); x++) {
printf("common.tEpiRowLoc[%d] = %d\n", x, common.tEpiRowLoc[x]);
printf("common.tEpiColLoc[%d] = %d\n", x, common.tEpiColLoc[x]);
fprintf(ofile, "common.tEpiRowLoc[%d] = %d\n", x, common.tEpiRowLoc[x]);
fprintf(ofile, "common.tEpiColLoc[%d] = %d\n", x, common.tEpiColLoc[x]);
}
fclose(ofile);
if(goldfile){
FILE *gold = fopen(goldfile, "r");
FILE *result = fopen("result.txt", "r");
int result_error=0;
while(!feof(gold)&&!feof(result)){
if (fgetc(gold)!=fgetc(result)) {
result_error = 1;
break;
}
}
if((feof(gold)^feof(result)) | result_error) {
printf("\nFAILED\n");
} else {
printf("\nPASSED\n");
}
fclose(gold);
fclose(result);
}
//======================================================================================================================================================
// DEALLOCATION
//======================================================================================================================================================
//====================================================================================================
// COMMON
//====================================================================================================
// frame
cudaFree(common_change.d_frame);
// endo points
free(common.endoRow);
free(common.endoCol);
free(common.tEndoRowLoc);
free(common.tEndoColLoc);
cudaFree(common.d_endoRow);
cudaFree(common.d_endoCol);
cudaFree(common.d_tEndoRowLoc);
cudaFree(common.d_tEndoColLoc);
cudaFree(common.d_endoT);
// epi points
free(common.epiRow);
free(common.epiCol);
free(common.tEpiRowLoc);
free(common.tEpiColLoc);
cudaFree(common.d_epiRow);
cudaFree(common.d_epiCol);
cudaFree(common.d_tEpiRowLoc);
cudaFree(common.d_tEpiColLoc);
cudaFree(common.d_epiT);
//====================================================================================================
// POINTERS
//====================================================================================================
for(i=0; i<common.allPoints; i++){
cudaFree(unique[i].d_in2);
cudaFree(unique[i].d_conv);
cudaFree(unique[i].d_in2_pad_cumv);
cudaFree(unique[i].d_in2_pad_cumv_sel);
cudaFree(unique[i].d_in2_sub_cumh);
cudaFree(unique[i].d_in2_sub_cumh_sel);
cudaFree(unique[i].d_in2_sub2);
cudaFree(unique[i].d_in2_sqr);
cudaFree(unique[i].d_in2_sqr_sub2);
cudaFree(unique[i].d_in_sqr);
cudaFree(unique[i].d_tMask);
cudaFree(unique[i].d_mask_conv);
}
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
|
6b1715ff682aacc3f1eb0133cc44fa636c0aa224.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// filename: vadd.cu
// a simple CUDA kernel to add two vectors
extern "C" // ensure function name to be exactly "vadd"
{
__global__ void vadd(const float *a, const float *b, float *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] + b[i];
}
}
| 6b1715ff682aacc3f1eb0133cc44fa636c0aa224.cu | // filename: vadd.cu
// a simple CUDA kernel to add two vectors
extern "C" // ensure function name to be exactly "vadd"
{
__global__ void vadd(const float *a, const float *b, float *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] + b[i];
}
}
|
443e6454a60dd1199e14d0bafe1fa34765638e44.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/ATen.h>
#include <ATen/hip/Exceptions.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include "fbgemm_gpu/fbgemm_cuda_utils.cuh"
using namespace at;
using namespace fbgemm_gpu;
// Registered CUDA managed memory (UVM) deleter
static void CUDAManagedDeleter(void* ptr) {
AT_CUDA_CHECK(hipFree(ptr));
}
// Wrapper for CUDA managed memory (UVM) allocator
struct CUDAManagedAllocator final : public at::Allocator {
at::DataPtr allocate(size_t size) const override {
void* ptr;
AT_CUDA_CHECK(hipMallocManaged(&ptr, size));
// User hints with "preferred location": Here the kernel will page fault
// and generate direct mapping to data on the CPU.
AT_CUDA_CHECK(hipMemAdvise(
ptr, size, hipMemAdviseSetPreferredLocation, hipCpuDeviceId));
// User hints with "accessed by": GPU will establish direct mapping of data
// in CPU memory, no page faults will be generated
AT_CUDA_CHECK(hipMemAdvise(
ptr, size, hipMemAdviseSetAccessedBy, at::hip::current_device()));
return {ptr,
ptr,
&CUDAManagedDeleter,
{at::DeviceType::CUDA, at::hip::current_device()}};
}
at::DeleterFnPtr raw_deleter() const override {
return &CUDAManagedDeleter;
}
};
// Registered CUDA host-mapped memory deleter
static void CUDAHostMappedDeleter(void* ptr) {
AT_CUDA_CHECK(hipHostFree(ptr));
}
// Wrapper for CUDA host-mapped memory allocator
struct CUDAHostMappedAllocator final : public at::Allocator {
at::DataPtr allocate(size_t size) const override {
void* ptr;
AT_CUDA_CHECK(hipHostMalloc(
&ptr, size, hipHostMallocWriteCombined | hipHostMallocMapped));
void* dev_ptr;
AT_CUDA_CHECK(hipHostGetDevicePointer(&dev_ptr, ptr, 0));
return {dev_ptr,
ptr,
&CUDAHostMappedDeleter,
{at::DeviceType::CUDA, at::hip::current_device()}};
}
at::DeleterFnPtr raw_deleter() const override {
return &CUDAHostMappedDeleter;
}
};
static CUDAManagedAllocator g_managed_allocator;
static CUDAHostMappedAllocator g_host_mapped_allocator;
// Get the default strides from the input Tensor dimensions
std::vector<int64_t> defaultStrides(IntArrayRef sizes) {
std::vector<int64_t> strides(sizes.size());
int64_t stride = 1;
for (size_t i = sizes.size(); i > 0; --i) {
strides[i - 1] = stride;
stride *= sizes[i - 1];
}
return strides;
}
// Allocate the ATen Tensor with unified managed memory (UVM)
Tensor new_managed_tensor(Tensor self, std::vector<std::int64_t> sizes) {
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(self.get_device());
auto strides = defaultStrides(sizes);
auto storage = Storage(
Storage::use_byte_size_t(),
at::detail::computeStorageNbytes(sizes, strides, self.dtype().itemsize()),
&g_managed_allocator,
/*resizable=*/false);
auto tensor = at::empty({0}, self.options()).set_(storage, 0, sizes, strides);
return tensor;
}
// Allocate the ATen Tensor with host-mapped memory
Tensor new_host_mapped_tensor(Tensor self, std::vector<std::int64_t> sizes) {
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(self.get_device());
auto strides = defaultStrides(sizes);
auto storage = Storage(
Storage::use_byte_size_t(),
at::detail::computeStorageNbytes(sizes, strides, self.dtype().itemsize()),
&g_host_mapped_allocator,
/*resizable=*/false);
auto tensor = at::empty({0}, self.options()).set_(storage, 0, sizes, strides);
return tensor;
}
// Check if a tensor is allocated with UVM or host-mapped memory
bool is_uvm_tensor(Tensor t) {
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(t.get_device());
return t.storage().allocator() == &g_managed_allocator ||
t.storage().allocator() == &g_host_mapped_allocator;
}
// Convert a UVM tensor to a CPU tensor
Tensor uvm_to_cpu(Tensor t) {
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(t.get_device());
TORCH_CHECK(is_uvm_tensor(t));
// not copy the storage
return at::from_blob(
t.data_ptr(), t.sizes(), t.strides(), t.options().device(kCPU));
}
| 443e6454a60dd1199e14d0bafe1fa34765638e44.cu | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/ATen.h>
#include <ATen/cuda/Exceptions.h>
#include <c10/cuda/CUDAGuard.h>
#include "fbgemm_gpu/fbgemm_cuda_utils.cuh"
using namespace at;
using namespace fbgemm_gpu;
// Registered CUDA managed memory (UVM) deleter
static void CUDAManagedDeleter(void* ptr) {
AT_CUDA_CHECK(cudaFree(ptr));
}
// Wrapper for CUDA managed memory (UVM) allocator
struct CUDAManagedAllocator final : public at::Allocator {
at::DataPtr allocate(size_t size) const override {
void* ptr;
AT_CUDA_CHECK(cudaMallocManaged(&ptr, size));
// User hints with "preferred location": Here the kernel will page fault
// and generate direct mapping to data on the CPU.
AT_CUDA_CHECK(cudaMemAdvise(
ptr, size, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId));
// User hints with "accessed by": GPU will establish direct mapping of data
// in CPU memory, no page faults will be generated
AT_CUDA_CHECK(cudaMemAdvise(
ptr, size, cudaMemAdviseSetAccessedBy, at::cuda::current_device()));
return {ptr,
ptr,
&CUDAManagedDeleter,
{at::DeviceType::CUDA, at::cuda::current_device()}};
}
at::DeleterFnPtr raw_deleter() const override {
return &CUDAManagedDeleter;
}
};
// Registered CUDA host-mapped memory deleter
static void CUDAHostMappedDeleter(void* ptr) {
AT_CUDA_CHECK(cudaFreeHost(ptr));
}
// Wrapper for CUDA host-mapped memory allocator
struct CUDAHostMappedAllocator final : public at::Allocator {
at::DataPtr allocate(size_t size) const override {
void* ptr;
AT_CUDA_CHECK(cudaHostAlloc(
&ptr, size, cudaHostAllocWriteCombined | cudaHostAllocMapped));
void* dev_ptr;
AT_CUDA_CHECK(cudaHostGetDevicePointer(&dev_ptr, ptr, 0));
return {dev_ptr,
ptr,
&CUDAHostMappedDeleter,
{at::DeviceType::CUDA, at::cuda::current_device()}};
}
at::DeleterFnPtr raw_deleter() const override {
return &CUDAHostMappedDeleter;
}
};
static CUDAManagedAllocator g_managed_allocator;
static CUDAHostMappedAllocator g_host_mapped_allocator;
// Get the default strides from the input Tensor dimensions
std::vector<int64_t> defaultStrides(IntArrayRef sizes) {
std::vector<int64_t> strides(sizes.size());
int64_t stride = 1;
for (size_t i = sizes.size(); i > 0; --i) {
strides[i - 1] = stride;
stride *= sizes[i - 1];
}
return strides;
}
// Allocate the ATen Tensor with unified managed memory (UVM)
Tensor new_managed_tensor(Tensor self, std::vector<std::int64_t> sizes) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(self.get_device());
auto strides = defaultStrides(sizes);
auto storage = Storage(
Storage::use_byte_size_t(),
at::detail::computeStorageNbytes(sizes, strides, self.dtype().itemsize()),
&g_managed_allocator,
/*resizable=*/false);
auto tensor = at::empty({0}, self.options()).set_(storage, 0, sizes, strides);
return tensor;
}
// Allocate the ATen Tensor with host-mapped memory
Tensor new_host_mapped_tensor(Tensor self, std::vector<std::int64_t> sizes) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(self.get_device());
auto strides = defaultStrides(sizes);
auto storage = Storage(
Storage::use_byte_size_t(),
at::detail::computeStorageNbytes(sizes, strides, self.dtype().itemsize()),
&g_host_mapped_allocator,
/*resizable=*/false);
auto tensor = at::empty({0}, self.options()).set_(storage, 0, sizes, strides);
return tensor;
}
// Check if a tensor is allocated with UVM or host-mapped memory
bool is_uvm_tensor(Tensor t) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(t.get_device());
return t.storage().allocator() == &g_managed_allocator ||
t.storage().allocator() == &g_host_mapped_allocator;
}
// Convert a UVM tensor to a CPU tensor
Tensor uvm_to_cpu(Tensor t) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(t.get_device());
TORCH_CHECK(is_uvm_tensor(t));
// not copy the storage
return at::from_blob(
t.data_ptr(), t.sizes(), t.strides(), t.options().device(kCPU));
}
|
0b7e839102045a6493cf532ad8556422ed7c862b.hip | // !!! This is a file automatically generated by hipify!!!
// This file defines a CUDA benchmark which spins for a set number of
// iterations. Like timer_spin, it is very simple, but unlike timer_spin it
// should perform a constant amount of processing work, rather than simply
// waiting for a set amount of time. Therefore, this benchmark's runtime should
// be subject to other workloads running on the GPU.
//
// The specific number of loop iterations to run is given as an integer value
// in the "additional_info" configuration object. If this value isn't set, then
// the benchmark will execute an arbitrary constant number of operations.
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include "benchmark_gpu_utilities.h"
#include "library_interface.h"
// If no number is provided, execute this number of operations.
#define DEFAULT_LOOP_ITERATIONS (1 * 1000 * 1000)
// Holds the local state for one instance of this benchmark.
typedef struct {
// The CUDA stream with which all operations will be associated.
hipStream_t stream;
// This will be set to 0 if the CUDA stream hasn't been created yet. This is
// useful because it allows us to unconditionally call Cleanup on error
// without needing to worry about calling hipStreamDestroy twice.
int stream_created;
// Holds the device copy of the overall start and end time of the kernel.
uint64_t *device_kernel_times;
// Holds the device copy of the start and end times of each block.
uint64_t *device_block_times;
// Holds the device copy of the SMID each block was assigned to.
uint32_t *device_block_smids;
// The number of iterations the kernel's loop should spin for.
uint64_t loop_iterations;
// Holds the grid dimension to use, set during initialization.
int block_count;
int thread_count;
// Holds host-side times that are shared with the calling process.
KernelTimes spin_kernel_times;
} BenchmarkState;
// Implements the cleanup function required by the library interface, but is
// also called internally (only during Initialize()) to clean up after errors.
static void Cleanup(void *data) {
BenchmarkState *state = (BenchmarkState *) data;
KernelTimes *host_times = &state->spin_kernel_times;
// Free device memory.
if (state->device_kernel_times) hipFree(state->device_kernel_times);
if (state->device_block_times) hipFree(state->device_block_times);
if (state->device_block_smids) hipFree(state->device_block_smids);
// Free host memory.
if (host_times->kernel_times) hipHostFree(host_times->kernel_times);
if (host_times->block_times) hipHostFree(host_times->block_times);
if (host_times->block_smids) hipHostFree(host_times->block_smids);
if (state->stream_created) {
// Call CheckCUDAError here to print a message, even though we won't check
// the return value.
CheckCUDAError(hipStreamDestroy(state->stream));
}
memset(state, 0, sizeof(*state));
free(state);
}
// Allocates GPU and CPU memory. Returns 0 on error, 1 otherwise.
static int AllocateMemory(BenchmarkState *state) {
uint64_t block_times_size = state->block_count * sizeof(uint64_t) * 2;
uint64_t block_smids_size = state->block_count * sizeof(uint32_t);
KernelTimes *host_times = &state->spin_kernel_times;
// Allocate device memory
if (!CheckCUDAError(hipMalloc(&(state->device_kernel_times),
2 * sizeof(uint64_t)))) {
return 0;
}
if (!CheckCUDAError(hipMalloc(&(state->device_block_times),
block_times_size))) {
return 0;
}
if (!CheckCUDAError(hipMalloc(&(state->device_block_smids),
block_smids_size))) {
return 0;
}
// Allocate host memory.
if (!CheckCUDAError(hipHostMalloc(&host_times->kernel_times, 2 *
sizeof(uint64_t)))) {
return 0;
}
if (!CheckCUDAError(hipHostMalloc(&host_times->block_times,
block_times_size))) {
return 0;
}
if (!CheckCUDAError(hipHostMalloc(&host_times->block_smids,
block_smids_size))) {
return 0;
}
return 1;
}
// If the given argument is a non-NULL, non-empty string, attempts to set the
// loop_iterations by parsing it as a number of operations. Otherwise, this
// function will set loop_iterations to a default value. Returns 0 if the
// argument has been set to an invalid number, or nonzero on success.
static int SetLoopIterations(const char *arg, BenchmarkState *state) {
int64_t parsed_value;
if (!arg || (strlen(arg) == 0)) {
state->loop_iterations = DEFAULT_LOOP_ITERATIONS;
return 1;
}
char *end = NULL;
parsed_value = strtoll(arg, &end, 10);
if ((*end != 0) || (parsed_value < 0)) {
printf("Invalid operations count: %s\n", arg);
return 0;
}
state->loop_iterations = (uint64_t) parsed_value;
return 1;
}
static void* Initialize(InitializationParameters *params) {
BenchmarkState *state = NULL;
state = (BenchmarkState *) malloc(sizeof(*state));
if (!state) return NULL;
memset(state, 0, sizeof(*state));
if (!CheckCUDAError(hipSetDevice(params->cuda_device))) return NULL;
state->thread_count = params->thread_count;
state->block_count = params->block_count;
if (!AllocateMemory(state)) {
Cleanup(state);
return NULL;
}
if (!SetLoopIterations(params->additional_info, state)) {
Cleanup(state);
return NULL;
}
if (!CheckCUDAError(CreateCUDAStreamWithPriority(params->stream_priority,
&(state->stream)))) {
Cleanup(state);
return NULL;
}
state->stream_created = 1;
return state;
}
// Nothing needs to be copied in for this benchmark.
static int CopyIn(void *data) {
return 1;
}
// Spins in a loop until the set number of loop iterations have completed. The
// throwaway argument can be NULL; it's only used to prevent optimizing out the
// loop body.
static __global__ void CounterSpin(uint64_t iterations, uint64_t *kernel_times,
uint64_t *block_times, uint32_t *block_smids, uint64_t *throwaway) {
uint64_t start_time = GlobalTimer64();
uint64_t i, accumulator;
// Start by recording the kernel and block start times
if (threadIdx.x == 0) {
if (blockIdx.x == 0) kernel_times[0] = start_time;
block_times[blockIdx.x * 2] = start_time;
block_smids[blockIdx.x] = GetSMID();
}
__syncthreads();
for (i = 0; i < iterations; i++) {
accumulator += blockIdx.x;
}
// By leaving the possibility that the value may be used, we prevent the loop
// from being removed.
if (throwaway) *throwaway = accumulator;
block_times[blockIdx.x * 2 + 1] = GlobalTimer64();
kernel_times[1] = GlobalTimer64();
}
static int Execute(void *data) {
BenchmarkState *state = (BenchmarkState *) data;
hipLaunchKernelGGL(( CounterSpin), dim3(state->block_count), dim3(state->thread_count), 0, state->stream,
state->loop_iterations, state->device_kernel_times,
state->device_block_times, state->device_block_smids, NULL);
if (!CheckCUDAError(hipStreamSynchronize(state->stream))) return 0;
return 1;
}
static int CopyOut(void *data, TimingInformation *times) {
BenchmarkState *state = (BenchmarkState *) data;
KernelTimes *host_times = &state->spin_kernel_times;
uint64_t block_times_count = state->block_count * 2;
uint64_t block_smids_count = state->block_count;
memset(times, 0, sizeof(*times));
if (!CheckCUDAError(hipMemcpyAsync(host_times->kernel_times,
state->device_kernel_times, 2 * sizeof(uint64_t),
hipMemcpyDeviceToHost, state->stream))) {
return 0;
}
if (!CheckCUDAError(hipMemcpyAsync(host_times->block_times,
state->device_block_times, block_times_count * sizeof(uint64_t),
hipMemcpyDeviceToHost, state->stream))) {
return 0;
}
if (!CheckCUDAError(hipMemcpyAsync(host_times->block_smids,
state->device_block_smids, block_smids_count * sizeof(uint32_t),
hipMemcpyDeviceToHost, state->stream))) {
return 0;
}
if (!CheckCUDAError(hipStreamSynchronize(state->stream))) return 0;
host_times->kernel_name = "CounterSpin";
host_times->block_count = state->block_count;
host_times->thread_count = state->thread_count;
times->kernel_count = 1;
times->kernel_info = host_times;
return 1;
}
static const char* GetName(void) {
return "Counter Spin";
}
// This should be the only function we export from the library, to provide
// pointers to all of the other functions.
int RegisterFunctions(BenchmarkLibraryFunctions *functions) {
functions->initialize = Initialize;
functions->copy_in = CopyIn;
functions->execute = Execute;
functions->copy_out = CopyOut;
functions->cleanup = Cleanup;
functions->get_name = GetName;
return 1;
}
| 0b7e839102045a6493cf532ad8556422ed7c862b.cu | // This file defines a CUDA benchmark which spins for a set number of
// iterations. Like timer_spin, it is very simple, but unlike timer_spin it
// should perform a constant amount of processing work, rather than simply
// waiting for a set amount of time. Therefore, this benchmark's runtime should
// be subject to other workloads running on the GPU.
//
// The specific number of loop iterations to run is given as an integer value
// in the "additional_info" configuration object. If this value isn't set, then
// the benchmark will execute an arbitrary constant number of operations.
#include <cuda_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include "benchmark_gpu_utilities.h"
#include "library_interface.h"
// If no number is provided, execute this number of operations.
#define DEFAULT_LOOP_ITERATIONS (1 * 1000 * 1000)
// Holds the local state for one instance of this benchmark.
typedef struct {
// The CUDA stream with which all operations will be associated.
cudaStream_t stream;
// This will be set to 0 if the CUDA stream hasn't been created yet. This is
// useful because it allows us to unconditionally call Cleanup on error
// without needing to worry about calling cudaStreamDestroy twice.
int stream_created;
// Holds the device copy of the overall start and end time of the kernel.
uint64_t *device_kernel_times;
// Holds the device copy of the start and end times of each block.
uint64_t *device_block_times;
// Holds the device copy of the SMID each block was assigned to.
uint32_t *device_block_smids;
// The number of iterations the kernel's loop should spin for.
uint64_t loop_iterations;
// Holds the grid dimension to use, set during initialization.
int block_count;
int thread_count;
// Holds host-side times that are shared with the calling process.
KernelTimes spin_kernel_times;
} BenchmarkState;
// Implements the cleanup function required by the library interface, but is
// also called internally (only during Initialize()) to clean up after errors.
static void Cleanup(void *data) {
BenchmarkState *state = (BenchmarkState *) data;
KernelTimes *host_times = &state->spin_kernel_times;
// Free device memory.
if (state->device_kernel_times) cudaFree(state->device_kernel_times);
if (state->device_block_times) cudaFree(state->device_block_times);
if (state->device_block_smids) cudaFree(state->device_block_smids);
// Free host memory.
if (host_times->kernel_times) cudaFreeHost(host_times->kernel_times);
if (host_times->block_times) cudaFreeHost(host_times->block_times);
if (host_times->block_smids) cudaFreeHost(host_times->block_smids);
if (state->stream_created) {
// Call CheckCUDAError here to print a message, even though we won't check
// the return value.
CheckCUDAError(cudaStreamDestroy(state->stream));
}
memset(state, 0, sizeof(*state));
free(state);
}
// Allocates GPU and CPU memory. Returns 0 on error, 1 otherwise.
static int AllocateMemory(BenchmarkState *state) {
uint64_t block_times_size = state->block_count * sizeof(uint64_t) * 2;
uint64_t block_smids_size = state->block_count * sizeof(uint32_t);
KernelTimes *host_times = &state->spin_kernel_times;
// Allocate device memory
if (!CheckCUDAError(cudaMalloc(&(state->device_kernel_times),
2 * sizeof(uint64_t)))) {
return 0;
}
if (!CheckCUDAError(cudaMalloc(&(state->device_block_times),
block_times_size))) {
return 0;
}
if (!CheckCUDAError(cudaMalloc(&(state->device_block_smids),
block_smids_size))) {
return 0;
}
// Allocate host memory.
if (!CheckCUDAError(cudaMallocHost(&host_times->kernel_times, 2 *
sizeof(uint64_t)))) {
return 0;
}
if (!CheckCUDAError(cudaMallocHost(&host_times->block_times,
block_times_size))) {
return 0;
}
if (!CheckCUDAError(cudaMallocHost(&host_times->block_smids,
block_smids_size))) {
return 0;
}
return 1;
}
// If the given argument is a non-NULL, non-empty string, attempts to set the
// loop_iterations by parsing it as a number of operations. Otherwise, this
// function will set loop_iterations to a default value. Returns 0 if the
// argument has been set to an invalid number, or nonzero on success.
static int SetLoopIterations(const char *arg, BenchmarkState *state) {
int64_t parsed_value;
if (!arg || (strlen(arg) == 0)) {
state->loop_iterations = DEFAULT_LOOP_ITERATIONS;
return 1;
}
char *end = NULL;
parsed_value = strtoll(arg, &end, 10);
if ((*end != 0) || (parsed_value < 0)) {
printf("Invalid operations count: %s\n", arg);
return 0;
}
state->loop_iterations = (uint64_t) parsed_value;
return 1;
}
static void* Initialize(InitializationParameters *params) {
BenchmarkState *state = NULL;
state = (BenchmarkState *) malloc(sizeof(*state));
if (!state) return NULL;
memset(state, 0, sizeof(*state));
if (!CheckCUDAError(cudaSetDevice(params->cuda_device))) return NULL;
state->thread_count = params->thread_count;
state->block_count = params->block_count;
if (!AllocateMemory(state)) {
Cleanup(state);
return NULL;
}
if (!SetLoopIterations(params->additional_info, state)) {
Cleanup(state);
return NULL;
}
if (!CheckCUDAError(CreateCUDAStreamWithPriority(params->stream_priority,
&(state->stream)))) {
Cleanup(state);
return NULL;
}
state->stream_created = 1;
return state;
}
// Nothing needs to be copied in for this benchmark.
static int CopyIn(void *data) {
return 1;
}
// Spins in a loop until the set number of loop iterations have completed. The
// throwaway argument can be NULL; it's only used to prevent optimizing out the
// loop body.
static __global__ void CounterSpin(uint64_t iterations, uint64_t *kernel_times,
uint64_t *block_times, uint32_t *block_smids, uint64_t *throwaway) {
uint64_t start_time = GlobalTimer64();
uint64_t i, accumulator;
// Start by recording the kernel and block start times
if (threadIdx.x == 0) {
if (blockIdx.x == 0) kernel_times[0] = start_time;
block_times[blockIdx.x * 2] = start_time;
block_smids[blockIdx.x] = GetSMID();
}
__syncthreads();
for (i = 0; i < iterations; i++) {
accumulator += blockIdx.x;
}
// By leaving the possibility that the value may be used, we prevent the loop
// from being removed.
if (throwaway) *throwaway = accumulator;
block_times[blockIdx.x * 2 + 1] = GlobalTimer64();
kernel_times[1] = GlobalTimer64();
}
static int Execute(void *data) {
BenchmarkState *state = (BenchmarkState *) data;
CounterSpin<<<state->block_count, state->thread_count, 0, state->stream>>>(
state->loop_iterations, state->device_kernel_times,
state->device_block_times, state->device_block_smids, NULL);
if (!CheckCUDAError(cudaStreamSynchronize(state->stream))) return 0;
return 1;
}
static int CopyOut(void *data, TimingInformation *times) {
BenchmarkState *state = (BenchmarkState *) data;
KernelTimes *host_times = &state->spin_kernel_times;
uint64_t block_times_count = state->block_count * 2;
uint64_t block_smids_count = state->block_count;
memset(times, 0, sizeof(*times));
if (!CheckCUDAError(cudaMemcpyAsync(host_times->kernel_times,
state->device_kernel_times, 2 * sizeof(uint64_t),
cudaMemcpyDeviceToHost, state->stream))) {
return 0;
}
if (!CheckCUDAError(cudaMemcpyAsync(host_times->block_times,
state->device_block_times, block_times_count * sizeof(uint64_t),
cudaMemcpyDeviceToHost, state->stream))) {
return 0;
}
if (!CheckCUDAError(cudaMemcpyAsync(host_times->block_smids,
state->device_block_smids, block_smids_count * sizeof(uint32_t),
cudaMemcpyDeviceToHost, state->stream))) {
return 0;
}
if (!CheckCUDAError(cudaStreamSynchronize(state->stream))) return 0;
host_times->kernel_name = "CounterSpin";
host_times->block_count = state->block_count;
host_times->thread_count = state->thread_count;
times->kernel_count = 1;
times->kernel_info = host_times;
return 1;
}
static const char* GetName(void) {
return "Counter Spin";
}
// This should be the only function we export from the library, to provide
// pointers to all of the other functions.
int RegisterFunctions(BenchmarkLibraryFunctions *functions) {
functions->initialize = Initialize;
functions->copy_in = CopyIn;
functions->execute = Execute;
functions->copy_out = CopyOut;
functions->cleanup = Cleanup;
functions->get_name = GetName;
return 1;
}
|
a4dca9c59e6d9b5f3f443efdf4eaec50da4da77a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
int row = blockIdx.x;
int col = threadIdx.x;
uchar4 pixel = rgbaImage[row * numCols + col];
float greyPix = 0.299f * pixel.x + 0.587f * pixel.y + 0.114f * pixel.z;
greyImage[row * numCols + col] = (unsigned char)greyPix;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const dim3 blockSize(numCols, 1, 1);
const dim3 gridSize( numRows, 1, 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| a4dca9c59e6d9b5f3f443efdf4eaec50da4da77a.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
int row = blockIdx.x;
int col = threadIdx.x;
uchar4 pixel = rgbaImage[row * numCols + col];
float greyPix = 0.299f * pixel.x + 0.587f * pixel.y + 0.114f * pixel.z;
greyImage[row * numCols + col] = (unsigned char)greyPix;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const dim3 blockSize(numCols, 1, 1);
const dim3 gridSize( numRows, 1, 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
fe3615e146d798aa06e89fcf4dede48b3d9b2df5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void BuildFrameIndex(float *a, float *b, float *c)
{
int index = threadIdx.x;
c[index] = a[index] + b[index];
}
| fe3615e146d798aa06e89fcf4dede48b3d9b2df5.cu | __global__ void BuildFrameIndex(float *a, float *b, float *c)
{
int index = threadIdx.x;
c[index] = a[index] + b[index];
}
|
decccaa7922c775db568e99b29635eeb03f44739.hip | // !!! This is a file automatically generated by hipify!!!
#include <iomanip>
#include <memory>
#include <chrono>
#include <vector>
#include <tuple>
#include <hip/hip_runtime.h>
#include <cudnn.h>
#include <hiprand/hiprand.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include "tensor.h"
#include "cudnn_helper.h"
#include "conv_problems.h"
#define USE_GET 0
#ifndef PAD_KERNELS
#define PAD_KERNELS 1
#endif
#ifndef USE_TENSOR_CORES
#if CUDNN_MAJOR >= 7
#define USE_TENSOR_CORES 1
#else
#define USE_TENSOR_CORES 0
#endif
#endif
/*
Usage:
The default precision is set based on the architecture and mode.
By default, the program runs the benchmark in training mode.
bin/conv_bench
To run inference mode, use the following command:
bin/conv_bench inference
To change the precision for training/inference, use:
bin/conv_bench train <precision>
bin/conv_bench inference <precision>
Supported precision types:
For Maxwell GPUS:
float for training and inference
For Pascal GPUS:
float, half for training
float, half, int8 for inference
*/
// T1 is used as the data type for inputs, weights and outputs.
// T2 is used to describe the compute precision. This is used in inference mode in the INT8_CONFIG
template <typename T1, typename T2>
class cudnnCNN {
TensorDescriptor4d<T1> x_desc_;
TensorDescriptor4d<T1> h_desc_;
FilterDescriptor4d<T1> w_desc_;
std::vector<int> output_dims_;
int num_repeats_;
size_t fwd_workspace_size_;
size_t bwd_inputs_workspace_size_;
size_t bwd_params_workspace_size_;
Tensor<float> fwd_workspace_;
Tensor<float> bwd_inputs_workspace_;
Tensor<float> bwd_params_workspace_;
cudnnConvolutionFwdAlgo_t fwd_algo_;
cudnnConvolutionBwdDataAlgo_t bwd_inputs_algo_;
cudnnConvolutionBwdFilterAlgo_t bwd_params_algo_;
const float alpha_ = 1.f;
const float beta_ = 0.f;
ConvolutionDescriptor<T2> conv_desc_;
CudnnHandle cudnn_handle_;
public:
cudnnCNN(int w, int h, int c, int n, int k, int r, int s,
int pad_w, int pad_h, int wstride, int hstride,
int group_count, int inference)
:
cudnn_handle_(),
conv_desc_(pad_h, pad_w, hstride, wstride)
{
int out_h, out_w, out_c, out_n;
cudnnTensorFormat_t format;
// For int8 inference, the supported format is NHWC
if (std::is_same<T1, uint8_t>::value) {
format = CUDNN_TENSOR_NHWC;
} else {
format = CUDNN_TENSOR_NCHW;
}
x_desc_ = TensorDescriptor4d<T1>(format, n, c, h, w);
w_desc_ = FilterDescriptor4d<T1>(format, k, c/group_count, r, s);
// Set group count
CHECK_CUDNN_ERROR(cudnnSetConvolutionGroupCount(conv_desc_.desc(),
group_count));
#if (CUDNN_MAJOR >= 7) && (USE_TENSOR_CORES)
cudnnSetConvolutionMathType(conv_desc_.desc(), CUDNN_TENSOR_OP_MATH);
#endif
// Get output dimensions
CHECK_CUDNN_ERROR(cudnnGetConvolution2dForwardOutputDim(conv_desc_.desc(),
x_desc_.desc(),
w_desc_.desc(),
&out_n,
&out_c,
&out_h,
&out_w));
h_desc_ = TensorDescriptor4d<T1>(format, out_n, out_c, out_h, out_w);
output_dims_ = {out_w, out_h, out_c, out_n};
#if USE_GET
if (std::is_same<T1, uint8_t>::value) {
//Note: cuDNN only supports IMPLICIT_PRECOMP_GEMM for int8 data type.
fwd_algo_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
} else {
// Pick forward convolution algorithm
CHECK_CUDNN_ERROR(cudnnGetConvolutionForwardAlgorithm(cudnn_handle_.handle(),
x_desc_.desc(),
w_desc_.desc(),
conv_desc_.desc(),
h_desc_.desc(),
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&fwd_algo_));
}
#else
// Pick forward convolution algorithm
cudnnConvolutionFwdAlgoPerf_t fwd_perf;
int ret_count;
if (std::is_same<T1, uint8_t>::value) {
//Note: cuDNN only supports IMPLICIT_PRECOMP_GEMM for int8 data type.
fwd_algo_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
} else {
CHECK_CUDNN_ERROR(cudnnFindConvolutionForwardAlgorithm(cudnn_handle_.handle(),
x_desc_.desc(),
w_desc_.desc(),
conv_desc_.desc(),
h_desc_.desc(),
1,
&ret_count,
&fwd_perf));
fwd_algo_ = fwd_perf.algo;
}
#endif
#if (CUDNN_MAJOR >= 7) && (USE_TENSOR_CORES)
// Tensor Op math only supports IMPLICIT_PRECOMP_GEMM algorithm
fwd_algo_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
#endif
if (std::is_same<T1, uint8_t>::value) {
//Note: cudnn workspace size function doesn't work for INT8_CONFIG
fwd_workspace_size_= 1073741824;
} else {
// Set fwd workspace size
CHECK_CUDNN_ERROR(cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle_.handle(),
x_desc_.desc(),
w_desc_.desc(),
conv_desc_.desc(),
h_desc_.desc(),
fwd_algo_,
&fwd_workspace_size_));
}
fwd_workspace_ = zeros<float>(std::vector<int>{static_cast<int>(fwd_workspace_size_ / sizeof(float)), 1});
if (!inference) {
#if USE_GET
// Pick backward convolution algorithm
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handle_.handle(),
x_desc_.desc(),
h_desc_.desc(),
conv_desc_.desc(),
w_desc_.desc(),
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST,
0,
&bwd_params_algo_));
#else
cudnnConvolutionBwdFilterAlgoPerf_t filter_perf;
if (std::is_same<T1, uint8_t>::value) {
fwd_algo_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
}
CHECK_CUDNN_ERROR(cudnnFindConvolutionBackwardFilterAlgorithm(cudnn_handle_.handle(),
x_desc_.desc(),
h_desc_.desc(),
conv_desc_.desc(),
w_desc_.desc(),
1,
&ret_count,
&filter_perf));
bwd_params_algo_ = filter_perf.algo;
#endif
#if (CUDNN_MAJOR >= 7) && (USE_TENSOR_CORES)
// Tensor Op math only supports this algorithm.
bwd_params_algo_ = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
#endif
// Backward params workspace
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handle_.handle(),
x_desc_.desc(),
h_desc_.desc(),
conv_desc_.desc(),
w_desc_.desc(),
bwd_params_algo_,
&bwd_params_workspace_size_));
bwd_params_workspace_ = zeros<float>(std::vector<int>{static_cast<int>(bwd_params_workspace_size_ / sizeof(float)), 1});
#if USE_GET
// Pick backward wrt inputs convolution algorithm
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handle_.handle(),
w_desc_.desc(),
h_desc_.desc(),
conv_desc_.desc(),
x_desc_.desc(),
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST,
0,
&bwd_inputs_algo_));
#else
cudnnConvolutionBwdDataAlgoPerf_t data_perf;
CHECK_CUDNN_ERROR(cudnnFindConvolutionBackwardDataAlgorithm(cudnn_handle_.handle(),
w_desc_.desc(),
h_desc_.desc(),
conv_desc_.desc(),
x_desc_.desc(),
1,
&ret_count,
&data_perf));
bwd_inputs_algo_ = data_perf.algo;
#endif
#if (CUDNN_MAJOR >= 7) && (USE_TENSOR_CORES)
//Tensor Op math only supports this algorithm.
bwd_inputs_algo_ = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
#endif
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handle_.handle(),
w_desc_.desc(),
h_desc_.desc(),
conv_desc_.desc(),
x_desc_.desc(),
bwd_inputs_algo_,
&bwd_inputs_workspace_size_));
bwd_inputs_workspace_ = zeros<float>(std::vector<int>{static_cast<int>(bwd_inputs_workspace_size_ / sizeof(float)), 1});
}
}
std::vector<int> get_output_dims() { return output_dims_; }
std::string get_fwd_algo_string() {
if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM)
return "IMPLICIT_GEMM";
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM)
return "IMPLICIT_PRECOMP_GEMM";
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_GEMM)
return "GEMM";
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_DIRECT)
return "DIRECT";
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_FFT)
return "FFT";
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING)
return "FFT_TILING";
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD)
return "WINOGRAD";
#if CUDNN_MAJOR >= 6
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED)
return "WINOGRAD_NONFUSED";
#endif
else {
std::stringstream ss;
ss << "Illegal algorithm passed to get_fwd_algo_string. Algo: " << fwd_algo_ << std::endl;
throw std::runtime_error(ss.str());
}
}
std::string get_bwd_inputs_algo_string() {
if (bwd_inputs_algo_ == CUDNN_CONVOLUTION_BWD_DATA_ALGO_0)
return "AL0";
else if (bwd_inputs_algo_ == CUDNN_CONVOLUTION_BWD_DATA_ALGO_1)
return "AL1";
else if (bwd_inputs_algo_ == CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT)
return "FFT";
else if (bwd_inputs_algo_ == CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING)
return "FFT_TILING";
else if (bwd_inputs_algo_ == CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD)
return "WINOGRAD";
else if (bwd_inputs_algo_ == CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED)
return "WINOGRAD_NONFUSED";
else {
std::stringstream ss;
ss << "Illegal algorithm passed to get_bwd_inputs_algo_string. Algo: " << bwd_inputs_algo_ << std::endl;
throw std::runtime_error(ss.str());
}
}
std::string get_bwd_params_algo_string() {
if (bwd_params_algo_ == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0)
return "AL0";
else if (bwd_params_algo_ == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1)
return "AL1";
else if (bwd_params_algo_ == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT)
return "FFT";
else if (bwd_params_algo_ == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3)
return "AL3";
else if (bwd_params_algo_ == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED)
return "WINOGRAD_NONFUSED";
else if (bwd_params_algo_ == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING)
return "FFT_TILING";
else {
std::stringstream ss;
ss << "Illegal algorithm passed to get_bwd_params_algo_string. Algo: " << bwd_params_algo_ << std::endl;
throw std::runtime_error(ss.str());
}
}
void forward(Tensor<T1> x, Tensor<T1> filter, Tensor<T1> h) {
// Convolution forward.
CHECK_CUDNN_ERROR(cudnnConvolutionForward(cudnn_handle_.handle(),
&alpha_,
x_desc_.desc(),
x.begin(),
w_desc_.desc(),
filter.begin(),
conv_desc_.desc(),
fwd_algo_,
fwd_workspace_.begin(),
fwd_workspace_size_,
&beta_,
h_desc_.desc(),
h.begin()));
}
void backward_params(Tensor<T1> x, Tensor<T1> delta, Tensor<T1> dW) {
CHECK_CUDNN_ERROR(cudnnConvolutionBackwardFilter(cudnn_handle_.handle(),
&alpha_,
x_desc_.desc(),
x.begin(),
h_desc_.desc(),
delta.begin(),
conv_desc_.desc(),
bwd_params_algo_,
bwd_params_workspace_.begin(),
bwd_params_workspace_size_,
&beta_,
w_desc_.desc(),
dW.begin()));
}
void backward_inputs(Tensor<T1> filter, Tensor<T1> delta, Tensor<T1> dX) {
CHECK_CUDNN_ERROR(cudnnConvolutionBackwardData(cudnn_handle_.handle(),
&alpha_,
w_desc_.desc(),
filter.begin(),
h_desc_.desc(),
delta.begin(),
conv_desc_.desc(),
bwd_inputs_algo_,
bwd_inputs_workspace_.begin(),
bwd_inputs_workspace_size_,
&beta_,
x_desc_.desc(),
dX.begin()));
}
};
template <typename T1, typename T2>
std::tuple<int, int, int, std::string, std::string, std::string> time_cnn(
int k, int c, int r, int s,
int n, int h, int w,
int pad_h, int pad_w,
int hstride, int wstride,
int group_count,
int num_repeats,
hiprandGenerator_t curand_gen,
int inference
) {
cudnnCNN<T1, T2> cnn(w, h, c, n, k, r, s, pad_w, pad_h, wstride, hstride, group_count, inference);
// Allocate memory for filter
auto filter = rand<T1>(std::vector<int>{s, r, c, k}, curand_gen);
// Allocate memory for input
auto input = rand<T1>(std::vector<int>{w, h, c, n}, curand_gen);
// Allocate memory for output tensor
auto output = zeros<T1>(cnn.get_output_dims());
std::string fwd_algo_s = cnn.get_fwd_algo_string();
std::string bwd_inputs_algo_s = cnn.get_bwd_inputs_algo_string();
std::string bwd_params_algo_s = cnn.get_bwd_params_algo_string();
//Warm up
cnn.forward(input, filter, output);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < num_repeats; ++i) {
cnn.forward(input, filter, output);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
int fwd_time = static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / num_repeats);
int bwd_inputs_time = 0;
int bwd_params_time = 0;
if (!inference) {
// Allocate memory for backward pass wrt weights
auto delta = rand<T1>(cnn.get_output_dims(), curand_gen);
auto dW = zeros<T1>(std::vector<int>{s, r, c, k});
// Warm up backward
cnn.backward_params(input, delta, dW);
hipDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < num_repeats; ++i) {
// Backward pass wrt weights
cnn.backward_params(input, delta, dW);
}
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
bwd_params_time = static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / num_repeats);
//Allocate memory for backward pass wrt inputs
auto dX = zeros<T1>(std::vector<int>{w, h, c, n});
//Warm up backward inputs
cnn.backward_inputs(filter, delta, dX);
hipDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < num_repeats; ++i) {
// Backward pass wrt weights
cnn.backward_inputs(filter, delta, dX);
}
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
bwd_inputs_time = static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / num_repeats);
}
return std::tuple<int, int, int, std::string, std::string, std::string>(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s, bwd_inputs_algo_s, bwd_params_algo_s);
}
int main(int argc, char **argv) {
int num_repeats = 300;
int inference = 0;
if (argc > 1) {
std::string inf = "inference";
inference = argv[1] == inf ? 1 : 0;
}
#if CUDNN_MAJOR >= 6
std::string precision;
if (inference)
precision = "int8";
else
precision = "half";
#else
std::string precision = "float";
#endif
if (argc > 2) {
precision = argv[2];
}
// Handles to various cuda libraries, structures
hiprandGenerator_t curand_gen;
hipFree(0);
// Initialize curand_gen and set appropriate seed.
hiprandCreateGenerator(&curand_gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(curand_gen, 123ULL);
if (inference) {
std::cout << std::setw(45) << "Running inference benchmark " << std::endl;
} else {
std::cout << std::setw(45) << "Running training benchmark " << std::endl;
}
std::cout << std::setw(30) << "Times" << std::endl;
std::cout << std::setfill('-') << std::setw(190) << "-" << std::endl;
std::cout << std::setfill(' ');
std::cout << " w h c n k f_w f_h pad_w pad_h stride_w stride_h group precision fwd_time (usec) ";
if (!inference) {
std::cout << "bwd_inputs_time (usec) bwd_params_time (usec) ";
std::cout << "total_time (usec)";
}
if (PAD_KERNELS && ((precision == "int8" && inference) || (USE_TENSOR_CORES && !inference)))
std::cout << " pad_kerenels ";
std::cout << " fwd_algo bwd_inputs_algo bwd_params_algo" << std::endl;
std::cout << std::setfill('-') << std::setw(200) << "-" << std::endl;
std::cout << std::setfill(' ');
int pad_kernels_count = 0;
for (const auto &problem : (inference ? inference_server_set : training_set)) {
// Filter parameters
int k, c, r, s; // r - filter_h (f_h), s - filter_w (f_w)
// Input parameters
int n, w, h;
// Padding
int pad_w, pad_h;
// Stride
int wstride, hstride;
// Groups
int group_count;
std::tie(w, h, c, n, k, s, r, pad_w, pad_h, wstride, hstride, group_count) = problem;
bool skip_kernel = false;
bool need_padding = false;
#if CUDNN_MAJOR >= 6
int padded_c, padded_w, padded_h;
int pad_value;
padded_c = c;
padded_h = h;
padded_w = w;
if (precision == "int8") {
pad_value = 4;
if (c % pad_value || w % pad_value || h % pad_value) {
pad_kernels_count++;
if (PAD_KERNELS) {
pad_dim(padded_c, pad_value);
pad_dim(padded_h, pad_value);
pad_dim(padded_w, pad_value);
need_padding = true;
} else {
skip_kernel = true;
}
}
}
#if (USE_TENSOR_CORES)
// Tensor cores need channels to be a multiple of 8. So, added padding for some kernels.
if (!inference) {
pad_value = 8;
if (c % pad_value) {
pad_kernels_count++;
if (PAD_KERNELS) {
pad_dim(padded_c, pad_value);
need_padding = true;
} else {
skip_kernel = true;
}
}
}
#endif
#endif
int fwd_time, bwd_inputs_time, bwd_params_time;
std::string fwd_algo_s;
std::string bwd_inputs_algo_s;
std::string bwd_params_algo_s;
std::stringstream ss;
ss << "Unsupported precision requested. Precision: " << precision << " Inference: " << inference;
#if CUDNN_MAJOR >= 6
if (precision == "float") {
std::tie(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s, bwd_inputs_algo_s, bwd_params_algo_s) =
time_cnn<float, float>(k, padded_c, r, s, n, padded_h, padded_w, pad_h, pad_w, hstride, wstride, group_count, num_repeats, curand_gen, inference);
} else if (precision == "half") {
std::tie(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s, bwd_inputs_algo_s, bwd_params_algo_s) =
time_cnn<uint16_t, uint16_t>(k, padded_c, r, s, n, padded_h, padded_w, pad_h, pad_w, hstride, wstride, group_count, num_repeats, curand_gen, inference);
} else if ((precision == "int8") && inference) {
if (!skip_kernel) {
std::tie(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s, bwd_inputs_algo_s, bwd_params_algo_s) =
time_cnn<uint8_t, int>(k, padded_c, r, s, n, padded_h, padded_w, pad_h, pad_w, hstride, wstride, group_count, num_repeats, curand_gen, inference);
}
} else {
throw std::runtime_error(ss.str());
}
#else
if (precision != "float")
throw std::runtime_error(ss.str());
std::tie(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s, bwd_inputs_algo_s, bwd_params_algo_s) =
time_cnn<float, float>(k, c, r, s, n, h, w, pad_h, pad_w, hstride, wstride, group_count, num_repeats, curand_gen, inference);
#endif
std::cout << std::setw(5) << w;
std::cout << std::setw(7) << h;
std::cout << std::setw(7) << c;
std::cout << std::setw(7) << n;
std::cout << std::setw(7) << k;
std::cout << std::setw(7) << s;
std::cout << std::setw(7) << r;
std::cout << std::setw(7) << pad_w;
std::cout << std::setw(8) << pad_h;
std::cout << std::setw(10) << wstride;
std::cout << std::setw(10) << hstride;
std::cout << std::setw(10) << group_count;
std::cout << std::setw(10) << precision;
std::cout << std::setw(15) << std::setprecision(7);
if (skip_kernel) {
std::cout << "Not Supported";
} else {
std::cout << fwd_time;
}
if (PAD_KERNELS && precision == "int8" && inference) {
std::cout << std::setw(15) << need_padding;
}
if (!inference) {
std::cout << std::setw(24) << std::setprecision(7) << bwd_inputs_time;
std::cout << std::setw(24) << std::setprecision(7) << bwd_params_time;
std::cout << std::setw(19) << std::setprecision(8) << fwd_time + bwd_inputs_time + bwd_params_time;
}
if (USE_TENSOR_CORES && PAD_KERNELS && !inference) {
std::cout << std::setw(15) << need_padding;
}
std::cout << std::setw(25) << fwd_algo_s;
std::cout << std::setw(25) << bwd_inputs_algo_s;
std::cout << std::setw(25) << bwd_params_algo_s;
std::cout << std::endl;
}
if (precision == "int8") {
std::cout << " Total kernels ";
if (PAD_KERNELS)
std::cout << "padded: " << pad_kernels_count << std::endl;
else
std::cout << "skipped: " << pad_kernels_count << std::endl;
std::cout << " Total kernels: " << inference_server_set.size() << std::endl;
}
// Destroy all the handles
hiprandDestroyGenerator(curand_gen);
return 0;
}
| decccaa7922c775db568e99b29635eeb03f44739.cu | #include <iomanip>
#include <memory>
#include <chrono>
#include <vector>
#include <tuple>
#include <cuda.h>
#include <cudnn.h>
#include <curand.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include "tensor.h"
#include "cudnn_helper.h"
#include "conv_problems.h"
#define USE_GET 0
#ifndef PAD_KERNELS
#define PAD_KERNELS 1
#endif
#ifndef USE_TENSOR_CORES
#if CUDNN_MAJOR >= 7
#define USE_TENSOR_CORES 1
#else
#define USE_TENSOR_CORES 0
#endif
#endif
/*
Usage:
The default precision is set based on the architecture and mode.
By default, the program runs the benchmark in training mode.
bin/conv_bench
To run inference mode, use the following command:
bin/conv_bench inference
To change the precision for training/inference, use:
bin/conv_bench train <precision>
bin/conv_bench inference <precision>
Supported precision types:
For Maxwell GPUS:
float for training and inference
For Pascal GPUS:
float, half for training
float, half, int8 for inference
*/
// T1 is used as the data type for inputs, weights and outputs.
// T2 is used to describe the compute precision. This is used in inference mode in the INT8_CONFIG
template <typename T1, typename T2>
class cudnnCNN {
TensorDescriptor4d<T1> x_desc_;
TensorDescriptor4d<T1> h_desc_;
FilterDescriptor4d<T1> w_desc_;
std::vector<int> output_dims_;
int num_repeats_;
size_t fwd_workspace_size_;
size_t bwd_inputs_workspace_size_;
size_t bwd_params_workspace_size_;
Tensor<float> fwd_workspace_;
Tensor<float> bwd_inputs_workspace_;
Tensor<float> bwd_params_workspace_;
cudnnConvolutionFwdAlgo_t fwd_algo_;
cudnnConvolutionBwdDataAlgo_t bwd_inputs_algo_;
cudnnConvolutionBwdFilterAlgo_t bwd_params_algo_;
const float alpha_ = 1.f;
const float beta_ = 0.f;
ConvolutionDescriptor<T2> conv_desc_;
CudnnHandle cudnn_handle_;
public:
cudnnCNN(int w, int h, int c, int n, int k, int r, int s,
int pad_w, int pad_h, int wstride, int hstride,
int group_count, int inference)
:
cudnn_handle_(),
conv_desc_(pad_h, pad_w, hstride, wstride)
{
int out_h, out_w, out_c, out_n;
cudnnTensorFormat_t format;
// For int8 inference, the supported format is NHWC
if (std::is_same<T1, uint8_t>::value) {
format = CUDNN_TENSOR_NHWC;
} else {
format = CUDNN_TENSOR_NCHW;
}
x_desc_ = TensorDescriptor4d<T1>(format, n, c, h, w);
w_desc_ = FilterDescriptor4d<T1>(format, k, c/group_count, r, s);
// Set group count
CHECK_CUDNN_ERROR(cudnnSetConvolutionGroupCount(conv_desc_.desc(),
group_count));
#if (CUDNN_MAJOR >= 7) && (USE_TENSOR_CORES)
cudnnSetConvolutionMathType(conv_desc_.desc(), CUDNN_TENSOR_OP_MATH);
#endif
// Get output dimensions
CHECK_CUDNN_ERROR(cudnnGetConvolution2dForwardOutputDim(conv_desc_.desc(),
x_desc_.desc(),
w_desc_.desc(),
&out_n,
&out_c,
&out_h,
&out_w));
h_desc_ = TensorDescriptor4d<T1>(format, out_n, out_c, out_h, out_w);
output_dims_ = {out_w, out_h, out_c, out_n};
#if USE_GET
if (std::is_same<T1, uint8_t>::value) {
//Note: cuDNN only supports IMPLICIT_PRECOMP_GEMM for int8 data type.
fwd_algo_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
} else {
// Pick forward convolution algorithm
CHECK_CUDNN_ERROR(cudnnGetConvolutionForwardAlgorithm(cudnn_handle_.handle(),
x_desc_.desc(),
w_desc_.desc(),
conv_desc_.desc(),
h_desc_.desc(),
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&fwd_algo_));
}
#else
// Pick forward convolution algorithm
cudnnConvolutionFwdAlgoPerf_t fwd_perf;
int ret_count;
if (std::is_same<T1, uint8_t>::value) {
//Note: cuDNN only supports IMPLICIT_PRECOMP_GEMM for int8 data type.
fwd_algo_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
} else {
CHECK_CUDNN_ERROR(cudnnFindConvolutionForwardAlgorithm(cudnn_handle_.handle(),
x_desc_.desc(),
w_desc_.desc(),
conv_desc_.desc(),
h_desc_.desc(),
1,
&ret_count,
&fwd_perf));
fwd_algo_ = fwd_perf.algo;
}
#endif
#if (CUDNN_MAJOR >= 7) && (USE_TENSOR_CORES)
// Tensor Op math only supports IMPLICIT_PRECOMP_GEMM algorithm
fwd_algo_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
#endif
if (std::is_same<T1, uint8_t>::value) {
//Note: cudnn workspace size function doesn't work for INT8_CONFIG
fwd_workspace_size_= 1073741824;
} else {
// Set fwd workspace size
CHECK_CUDNN_ERROR(cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle_.handle(),
x_desc_.desc(),
w_desc_.desc(),
conv_desc_.desc(),
h_desc_.desc(),
fwd_algo_,
&fwd_workspace_size_));
}
fwd_workspace_ = zeros<float>(std::vector<int>{static_cast<int>(fwd_workspace_size_ / sizeof(float)), 1});
if (!inference) {
#if USE_GET
// Pick backward convolution algorithm
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handle_.handle(),
x_desc_.desc(),
h_desc_.desc(),
conv_desc_.desc(),
w_desc_.desc(),
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST,
0,
&bwd_params_algo_));
#else
cudnnConvolutionBwdFilterAlgoPerf_t filter_perf;
if (std::is_same<T1, uint8_t>::value) {
fwd_algo_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
}
CHECK_CUDNN_ERROR(cudnnFindConvolutionBackwardFilterAlgorithm(cudnn_handle_.handle(),
x_desc_.desc(),
h_desc_.desc(),
conv_desc_.desc(),
w_desc_.desc(),
1,
&ret_count,
&filter_perf));
bwd_params_algo_ = filter_perf.algo;
#endif
#if (CUDNN_MAJOR >= 7) && (USE_TENSOR_CORES)
// Tensor Op math only supports this algorithm.
bwd_params_algo_ = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
#endif
// Backward params workspace
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handle_.handle(),
x_desc_.desc(),
h_desc_.desc(),
conv_desc_.desc(),
w_desc_.desc(),
bwd_params_algo_,
&bwd_params_workspace_size_));
bwd_params_workspace_ = zeros<float>(std::vector<int>{static_cast<int>(bwd_params_workspace_size_ / sizeof(float)), 1});
#if USE_GET
// Pick backward wrt inputs convolution algorithm
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handle_.handle(),
w_desc_.desc(),
h_desc_.desc(),
conv_desc_.desc(),
x_desc_.desc(),
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST,
0,
&bwd_inputs_algo_));
#else
cudnnConvolutionBwdDataAlgoPerf_t data_perf;
CHECK_CUDNN_ERROR(cudnnFindConvolutionBackwardDataAlgorithm(cudnn_handle_.handle(),
w_desc_.desc(),
h_desc_.desc(),
conv_desc_.desc(),
x_desc_.desc(),
1,
&ret_count,
&data_perf));
bwd_inputs_algo_ = data_perf.algo;
#endif
#if (CUDNN_MAJOR >= 7) && (USE_TENSOR_CORES)
//Tensor Op math only supports this algorithm.
bwd_inputs_algo_ = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
#endif
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handle_.handle(),
w_desc_.desc(),
h_desc_.desc(),
conv_desc_.desc(),
x_desc_.desc(),
bwd_inputs_algo_,
&bwd_inputs_workspace_size_));
bwd_inputs_workspace_ = zeros<float>(std::vector<int>{static_cast<int>(bwd_inputs_workspace_size_ / sizeof(float)), 1});
}
}
std::vector<int> get_output_dims() { return output_dims_; }
std::string get_fwd_algo_string() {
if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM)
return "IMPLICIT_GEMM";
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM)
return "IMPLICIT_PRECOMP_GEMM";
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_GEMM)
return "GEMM";
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_DIRECT)
return "DIRECT";
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_FFT)
return "FFT";
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING)
return "FFT_TILING";
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD)
return "WINOGRAD";
#if CUDNN_MAJOR >= 6
else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED)
return "WINOGRAD_NONFUSED";
#endif
else {
std::stringstream ss;
ss << "Illegal algorithm passed to get_fwd_algo_string. Algo: " << fwd_algo_ << std::endl;
throw std::runtime_error(ss.str());
}
}
std::string get_bwd_inputs_algo_string() {
if (bwd_inputs_algo_ == CUDNN_CONVOLUTION_BWD_DATA_ALGO_0)
return "AL0";
else if (bwd_inputs_algo_ == CUDNN_CONVOLUTION_BWD_DATA_ALGO_1)
return "AL1";
else if (bwd_inputs_algo_ == CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT)
return "FFT";
else if (bwd_inputs_algo_ == CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING)
return "FFT_TILING";
else if (bwd_inputs_algo_ == CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD)
return "WINOGRAD";
else if (bwd_inputs_algo_ == CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED)
return "WINOGRAD_NONFUSED";
else {
std::stringstream ss;
ss << "Illegal algorithm passed to get_bwd_inputs_algo_string. Algo: " << bwd_inputs_algo_ << std::endl;
throw std::runtime_error(ss.str());
}
}
std::string get_bwd_params_algo_string() {
if (bwd_params_algo_ == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0)
return "AL0";
else if (bwd_params_algo_ == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1)
return "AL1";
else if (bwd_params_algo_ == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT)
return "FFT";
else if (bwd_params_algo_ == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3)
return "AL3";
else if (bwd_params_algo_ == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED)
return "WINOGRAD_NONFUSED";
else if (bwd_params_algo_ == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING)
return "FFT_TILING";
else {
std::stringstream ss;
ss << "Illegal algorithm passed to get_bwd_params_algo_string. Algo: " << bwd_params_algo_ << std::endl;
throw std::runtime_error(ss.str());
}
}
void forward(Tensor<T1> x, Tensor<T1> filter, Tensor<T1> h) {
// Convolution forward.
CHECK_CUDNN_ERROR(cudnnConvolutionForward(cudnn_handle_.handle(),
&alpha_,
x_desc_.desc(),
x.begin(),
w_desc_.desc(),
filter.begin(),
conv_desc_.desc(),
fwd_algo_,
fwd_workspace_.begin(),
fwd_workspace_size_,
&beta_,
h_desc_.desc(),
h.begin()));
}
void backward_params(Tensor<T1> x, Tensor<T1> delta, Tensor<T1> dW) {
CHECK_CUDNN_ERROR(cudnnConvolutionBackwardFilter(cudnn_handle_.handle(),
&alpha_,
x_desc_.desc(),
x.begin(),
h_desc_.desc(),
delta.begin(),
conv_desc_.desc(),
bwd_params_algo_,
bwd_params_workspace_.begin(),
bwd_params_workspace_size_,
&beta_,
w_desc_.desc(),
dW.begin()));
}
void backward_inputs(Tensor<T1> filter, Tensor<T1> delta, Tensor<T1> dX) {
CHECK_CUDNN_ERROR(cudnnConvolutionBackwardData(cudnn_handle_.handle(),
&alpha_,
w_desc_.desc(),
filter.begin(),
h_desc_.desc(),
delta.begin(),
conv_desc_.desc(),
bwd_inputs_algo_,
bwd_inputs_workspace_.begin(),
bwd_inputs_workspace_size_,
&beta_,
x_desc_.desc(),
dX.begin()));
}
};
template <typename T1, typename T2>
std::tuple<int, int, int, std::string, std::string, std::string> time_cnn(
int k, int c, int r, int s,
int n, int h, int w,
int pad_h, int pad_w,
int hstride, int wstride,
int group_count,
int num_repeats,
curandGenerator_t curand_gen,
int inference
) {
cudnnCNN<T1, T2> cnn(w, h, c, n, k, r, s, pad_w, pad_h, wstride, hstride, group_count, inference);
// Allocate memory for filter
auto filter = rand<T1>(std::vector<int>{s, r, c, k}, curand_gen);
// Allocate memory for input
auto input = rand<T1>(std::vector<int>{w, h, c, n}, curand_gen);
// Allocate memory for output tensor
auto output = zeros<T1>(cnn.get_output_dims());
std::string fwd_algo_s = cnn.get_fwd_algo_string();
std::string bwd_inputs_algo_s = cnn.get_bwd_inputs_algo_string();
std::string bwd_params_algo_s = cnn.get_bwd_params_algo_string();
//Warm up
cnn.forward(input, filter, output);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < num_repeats; ++i) {
cnn.forward(input, filter, output);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
int fwd_time = static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / num_repeats);
int bwd_inputs_time = 0;
int bwd_params_time = 0;
if (!inference) {
// Allocate memory for backward pass wrt weights
auto delta = rand<T1>(cnn.get_output_dims(), curand_gen);
auto dW = zeros<T1>(std::vector<int>{s, r, c, k});
// Warm up backward
cnn.backward_params(input, delta, dW);
cudaDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < num_repeats; ++i) {
// Backward pass wrt weights
cnn.backward_params(input, delta, dW);
}
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
bwd_params_time = static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / num_repeats);
//Allocate memory for backward pass wrt inputs
auto dX = zeros<T1>(std::vector<int>{w, h, c, n});
//Warm up backward inputs
cnn.backward_inputs(filter, delta, dX);
cudaDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < num_repeats; ++i) {
// Backward pass wrt weights
cnn.backward_inputs(filter, delta, dX);
}
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
bwd_inputs_time = static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / num_repeats);
}
return std::tuple<int, int, int, std::string, std::string, std::string>(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s, bwd_inputs_algo_s, bwd_params_algo_s);
}
int main(int argc, char **argv) {
int num_repeats = 300;
int inference = 0;
if (argc > 1) {
std::string inf = "inference";
inference = argv[1] == inf ? 1 : 0;
}
#if CUDNN_MAJOR >= 6
std::string precision;
if (inference)
precision = "int8";
else
precision = "half";
#else
std::string precision = "float";
#endif
if (argc > 2) {
precision = argv[2];
}
// Handles to various cuda libraries, structures
curandGenerator_t curand_gen;
cudaFree(0);
// Initialize curand_gen and set appropriate seed.
curandCreateGenerator(&curand_gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(curand_gen, 123ULL);
if (inference) {
std::cout << std::setw(45) << "Running inference benchmark " << std::endl;
} else {
std::cout << std::setw(45) << "Running training benchmark " << std::endl;
}
std::cout << std::setw(30) << "Times" << std::endl;
std::cout << std::setfill('-') << std::setw(190) << "-" << std::endl;
std::cout << std::setfill(' ');
std::cout << " w h c n k f_w f_h pad_w pad_h stride_w stride_h group precision fwd_time (usec) ";
if (!inference) {
std::cout << "bwd_inputs_time (usec) bwd_params_time (usec) ";
std::cout << "total_time (usec)";
}
if (PAD_KERNELS && ((precision == "int8" && inference) || (USE_TENSOR_CORES && !inference)))
std::cout << " pad_kerenels ";
std::cout << " fwd_algo bwd_inputs_algo bwd_params_algo" << std::endl;
std::cout << std::setfill('-') << std::setw(200) << "-" << std::endl;
std::cout << std::setfill(' ');
int pad_kernels_count = 0;
for (const auto &problem : (inference ? inference_server_set : training_set)) {
// Filter parameters
int k, c, r, s; // r - filter_h (f_h), s - filter_w (f_w)
// Input parameters
int n, w, h;
// Padding
int pad_w, pad_h;
// Stride
int wstride, hstride;
// Groups
int group_count;
std::tie(w, h, c, n, k, s, r, pad_w, pad_h, wstride, hstride, group_count) = problem;
bool skip_kernel = false;
bool need_padding = false;
#if CUDNN_MAJOR >= 6
int padded_c, padded_w, padded_h;
int pad_value;
padded_c = c;
padded_h = h;
padded_w = w;
if (precision == "int8") {
pad_value = 4;
if (c % pad_value || w % pad_value || h % pad_value) {
pad_kernels_count++;
if (PAD_KERNELS) {
pad_dim(padded_c, pad_value);
pad_dim(padded_h, pad_value);
pad_dim(padded_w, pad_value);
need_padding = true;
} else {
skip_kernel = true;
}
}
}
#if (USE_TENSOR_CORES)
// Tensor cores need channels to be a multiple of 8. So, added padding for some kernels.
if (!inference) {
pad_value = 8;
if (c % pad_value) {
pad_kernels_count++;
if (PAD_KERNELS) {
pad_dim(padded_c, pad_value);
need_padding = true;
} else {
skip_kernel = true;
}
}
}
#endif
#endif
int fwd_time, bwd_inputs_time, bwd_params_time;
std::string fwd_algo_s;
std::string bwd_inputs_algo_s;
std::string bwd_params_algo_s;
std::stringstream ss;
ss << "Unsupported precision requested. Precision: " << precision << " Inference: " << inference;
#if CUDNN_MAJOR >= 6
if (precision == "float") {
std::tie(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s, bwd_inputs_algo_s, bwd_params_algo_s) =
time_cnn<float, float>(k, padded_c, r, s, n, padded_h, padded_w, pad_h, pad_w, hstride, wstride, group_count, num_repeats, curand_gen, inference);
} else if (precision == "half") {
std::tie(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s, bwd_inputs_algo_s, bwd_params_algo_s) =
time_cnn<uint16_t, uint16_t>(k, padded_c, r, s, n, padded_h, padded_w, pad_h, pad_w, hstride, wstride, group_count, num_repeats, curand_gen, inference);
} else if ((precision == "int8") && inference) {
if (!skip_kernel) {
std::tie(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s, bwd_inputs_algo_s, bwd_params_algo_s) =
time_cnn<uint8_t, int>(k, padded_c, r, s, n, padded_h, padded_w, pad_h, pad_w, hstride, wstride, group_count, num_repeats, curand_gen, inference);
}
} else {
throw std::runtime_error(ss.str());
}
#else
if (precision != "float")
throw std::runtime_error(ss.str());
std::tie(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s, bwd_inputs_algo_s, bwd_params_algo_s) =
time_cnn<float, float>(k, c, r, s, n, h, w, pad_h, pad_w, hstride, wstride, group_count, num_repeats, curand_gen, inference);
#endif
std::cout << std::setw(5) << w;
std::cout << std::setw(7) << h;
std::cout << std::setw(7) << c;
std::cout << std::setw(7) << n;
std::cout << std::setw(7) << k;
std::cout << std::setw(7) << s;
std::cout << std::setw(7) << r;
std::cout << std::setw(7) << pad_w;
std::cout << std::setw(8) << pad_h;
std::cout << std::setw(10) << wstride;
std::cout << std::setw(10) << hstride;
std::cout << std::setw(10) << group_count;
std::cout << std::setw(10) << precision;
std::cout << std::setw(15) << std::setprecision(7);
if (skip_kernel) {
std::cout << "Not Supported";
} else {
std::cout << fwd_time;
}
if (PAD_KERNELS && precision == "int8" && inference) {
std::cout << std::setw(15) << need_padding;
}
if (!inference) {
std::cout << std::setw(24) << std::setprecision(7) << bwd_inputs_time;
std::cout << std::setw(24) << std::setprecision(7) << bwd_params_time;
std::cout << std::setw(19) << std::setprecision(8) << fwd_time + bwd_inputs_time + bwd_params_time;
}
if (USE_TENSOR_CORES && PAD_KERNELS && !inference) {
std::cout << std::setw(15) << need_padding;
}
std::cout << std::setw(25) << fwd_algo_s;
std::cout << std::setw(25) << bwd_inputs_algo_s;
std::cout << std::setw(25) << bwd_params_algo_s;
std::cout << std::endl;
}
if (precision == "int8") {
std::cout << " Total kernels ";
if (PAD_KERNELS)
std::cout << "padded: " << pad_kernels_count << std::endl;
else
std::cout << "skipped: " << pad_kernels_count << std::endl;
std::cout << " Total kernels: " << inference_server_set.size() << std::endl;
}
// Destroy all the handles
curandDestroyGenerator(curand_gen);
return 0;
}
|
30b22fb119fbccae7762b3e7a07726a1f04ca610.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __cplusplus
extern "C" {
#include "pfb.h"
}
#endif
// data ptrs
char2* g_pc2InBuf = NULL;
char2* g_pc2InBufRead = NULL;
char2* g_pc2Data_d = NULL;
char2* g_pc2DataRead_d = NULL;
float2* g_pf2FFTIn_d = NULL;
float2* g_pf2FFTOut_d = NULL;
float *g_pfPFBCoeff = NULL;
float *g_pfPFBCoeff_d = NULL;
char* g_pcInputData_d = NULL;
// pfb params
int g_iNFFT = DEF_LEN_SPEC;
int g_iNTaps = NUM_TAPS;
int g_iNumSubBands = PFB_CHANNELS * DEF_NUM_ELEMENTS;
// process flags
int g_IsDataReadDone = FALSE;
int g_IsProcDone = FALSE;
// size vars
int g_iSizeFile = 0;
int g_iReadCount = 0;
int g_iSizeRead = DEF_SIZE_READ;
int g_iFileCoeff = 0;
char g_acFileCoeff[256] = {0};
// GPU params
dim3 g_dimBPFB(1, 1, 1);
dim3 g_dimGPFB(1, 1);
dim3 g_dimBCopy(1, 1, 1);
dim3 g_dimGCopy(1, 1);
dim3 mapGSize(1,1,1);
dim3 mapBSize(1,1,1);
dim3 saveGSize(1, 1, 1 ); // (5, 256, 1)
dim3 saveBSize(1, 1, 1); // (64, 1, 1)
hipfftHandle g_stPlan = {0};
int g_iMaxThreadsPerBlock = 0;
int g_iMaxPhysThreads = 0;
int runPFB(signed char* inputData_h, float* outputData_h, params pfbParams) {
g_IsProcDone = FALSE;
int iRet = EXIT_SUCCESS;
long lProcData = 0;
long ltotData = pfbParams.fine_channels*pfbParams.elements*(pfbParams.samples + pfbParams.nfft*pfbParams.taps);
int start = pfbParams.fine_channels*pfbParams.elements*pfbParams.nfft*pfbParams.taps;
int countFFT = 0;
int cpySize = pfbParams.fine_channels*pfbParams.elements*pfbParams.samples*(2*sizeof(char));
// copy data to device
//CUDASafeCallWithCleanUp(hipMemcpy(g_pcInputData_d, inputData_h, g_iSizeRead, hipMemcpyHostToDevice)); //g_iSizeRead = samples*coarse_channels*elements*(2*sizeof(char));
CUDASafeCallWithCleanUp(hipMemcpy(&g_pc2Data_d[start], inputData_h, cpySize, hipMemcpyHostToDevice));
// map - extract channel data from full data stream and load into buffer.
//map<<<mapGSize, mapBSize>>>(g_pcInputData_d, &g_pc2Data_d[start], pfbParams.select, pfbParams);
//CUDASafeCallWithCleanUp(hipGetLastError());
// Begin PFB
g_pc2DataRead_d = g_pc2Data_d; // p_pc2Data_d contains all the data. DataRead will update with each pass through the PFB.
int pfb_on = 1; // Enable pfb flag. Extendable.
if(pfb_on) {
//PFB
hipLaunchKernelGGL(( PFB_kernel), dim3(g_dimGPFB), dim3(g_dimBPFB), 0, 0, g_pc2DataRead_d, g_pf2FFTIn_d, g_pfPFBCoeff_d, pfbParams);
CUDASafeCallWithCleanUp(hipDeviceSynchronize());
} else {
// Prepare for FFT
hipLaunchKernelGGL(( CopyDataForFFT), dim3(g_dimGPFB), dim3(g_dimBPFB), 0, 0, g_pc2DataRead_d, g_pf2FFTIn_d);
CUDASafeCallWithCleanUp(hipGetLastError());
}
//float2* fftOutPtr = g_pf2FFTOut_d;
while(!g_IsProcDone) {
//FFT
iRet = doFFT();
if(iRet != EXIT_SUCCESS) {
(void) fprintf(stderr, "ERROR: FFT failed\n");
cleanUp();
return EXIT_FAILURE;
}
CUDASafeCallWithCleanUp(hipGetLastError());
++countFFT;
// step input and output buffers.
g_pf2FFTIn_d += g_iNumSubBands * g_iNFFT;
g_pf2FFTOut_d += g_iNumSubBands * g_iNFFT;
lProcData += g_iNumSubBands * g_iNFFT;
if(lProcData >= ltotData - NUM_TAPS*g_iNumSubBands*g_iNFFT){ // >= process 117 ffts leaving 256 time samples, > process 118 ffts leaving 224 time samples.
g_IsProcDone = TRUE;
}
}
// prepare next filter
g_pc2DataRead_d += countFFT*g_iNumSubBands*g_iNFFT;
hipLaunchKernelGGL(( saveData), dim3(saveGSize), dim3(saveBSize), 0, 0, g_pc2DataRead_d, g_pc2Data_d);
CUDASafeCallWithCleanUp(hipGetLastError());
// copy back to host.
//wind back in/out ptrs - should put in another pointer as a process read ptr instead of updating the global ptr.
g_pf2FFTOut_d = g_pf2FFTOut_d - countFFT*g_iNumSubBands*g_iNFFT;
g_pf2FFTIn_d = g_pf2FFTIn_d -countFFT*g_iNumSubBands*g_iNFFT;
int outDataSize = countFFT * g_iNumSubBands * g_iNFFT;
//CUDASafeCallWithCleanUp(hipMemcpy(outputData_h, fftOutPtr, outDataSize*sizeof(hipfftComplex), hipMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(hipMemcpy(outputData_h, g_pf2FFTOut_d, outDataSize*sizeof(hipfftComplex), hipMemcpyDeviceToHost));
return iRet;
}
void flushBuffer(params pfbParams) {
int start = pfbParams.fine_channels*pfbParams.elements*pfbParams.nfft*pfbParams.taps;
CUDASafeCallWithCleanUp(hipMemset((void *) g_pc2Data_d, 0, start*2*sizeof(char)));
return;
}
// return true or false upon successful setup.
int initPFB(int iCudaDevice, params pfbParams){
int iRet = EXIT_SUCCESS;
// set pfb params from input parameters.
pfbParams.subbands = pfbParams.elements*pfbParams.fine_channels;
g_iNFFT = pfbParams.nfft;
g_iNTaps = pfbParams.taps;
g_iNumSubBands = pfbParams.subbands; // equal to elements*fine_channels. (The fine channels are the channels processed.)
g_iSizeRead = pfbParams.samples*pfbParams.coarse_channels*pfbParams.elements*(2*sizeof(char));
char* coeffLoc = pfbParams.coeffPath;
int iDevCount = 0;
hipDeviceProp_t stDevProp = {0};
hipfftResult iCUFFTRet = HIPFFT_SUCCESS;
int i = 0;
//Register signal handlers?
/********************************************/
/* Look for eligable Cuda Device and select */
/********************************************/
(void) fprintf(stdout, "Querying CUDA devices.\n");
(void) hipGetDeviceCount(&iDevCount);
if (0 == iDevCount) {
(void) fprintf(stderr, "ERROR: No CUDA-capable device found!\n");
return EXIT_FAILURE;
}
// Look for requested device (if applicable)
if (iCudaDevice >= iDevCount) {
(void) fprintf(stderr,
"ERROR: Requested device %d no found in present %d device list.\n",
iCudaDevice,
iDevCount);
return EXIT_FAILURE;
}
// Query devices and setup selected device.
for(i = 0; i < iDevCount; i++) {
CUDASafeCallWithCleanUp(hipGetDeviceProperties(&stDevProp, i));
printf("\tDevice %d: %s, Compute Capability %d.%d, %d physical threads %s\n",
i,
stDevProp.name, stDevProp.major, stDevProp.minor,
stDevProp.multiProcessorCount * stDevProp.maxThreadsPerMultiProcessor,
(iCudaDevice == i) ? "<<SELECTED>>" : "");
}
CUDASafeCallWithCleanUp(hipSetDevice(iCudaDevice));
// Setup block and thread paramters
CUDASafeCallWithCleanUp(hipGetDeviceProperties(&stDevProp, 0));
g_iMaxThreadsPerBlock = stDevProp.maxThreadsPerBlock;
g_iMaxPhysThreads = stDevProp.multiProcessorCount * stDevProp.maxThreadsPerMultiProcessor;
// Check if valid operation lengths. i.e. The input buffer is long enough (should this be done here or elsewhere?)
// Set malloc size - lTotCUDAMalloc is used only to calculate the total amount of memory not used for the allocation.
size_t cudaMem_total, cudaMem_available;
size_t lTotCUDAMalloc = 0;
hipMemGetInfo(&cudaMem_available, &cudaMem_total);
lTotCUDAMalloc += g_iSizeRead; // size data
lTotCUDAMalloc += (g_iNumSubBands * pfbParams.samples * sizeof(float(2))); // size of FFT input array This should be different since our data is unsigned char?
lTotCUDAMalloc += (g_iNumSubBands * pfbParams.samples * sizeof(float(2))); // size of FFT output array
lTotCUDAMalloc += (g_iNumSubBands * g_iNFFT * sizeof(float)); // size of PFB Coefficients
// Check CUDA device can handle the memory request
if(lTotCUDAMalloc > stDevProp.totalGlobalMem) {
(void) fprintf(stderr,
"ERROR: Total memory requested on GPU is %g MB of %g possible MB (Total Global Memory: %g MB).\n"
"\t**** Memory breakdown *****\n"
"\tInput data buffer:\t%g MB\n"
"\tFFT in array:\t%g MB\n"
"\tFFT out array:\t%g MB\n"
"\tPFB Coefficients: %f KB\n",
((float) lTotCUDAMalloc) / (1024*1024),
((float) cudaMem_available) / (1024*1024), //stDevProp.totalGlobalMem
((float) cudaMem_total) / (1024*1024),
((float) g_iSizeRead) / (1024 * 1024),
((float) g_iNumSubBands * pfbParams.samples * sizeof(float2)) / (1024 * 1024),
((float) g_iNumSubBands * pfbParams.samples * sizeof(float2)) / (1024 * 1024),
((float) g_iNumSubBands * g_iNFFT * sizeof(float)));
return EXIT_FAILURE;
}
// print memory usage report.
(void) fprintf(stdout,
"INFO: Total memory requested on GPU is %g MB of %g possible MB (Total Global Memory: %g MB).\n"
"\t**** Memory breakdown ****\n"
"\tInput data buffer:\t%g MB\n"
"\tFFT in array:\t%g MB\n"
"\tFFT out array:\t%g MB\n"
"\tPFB Coefficients: %f KB\n",
((float) lTotCUDAMalloc) / (1024*1024),
((float) cudaMem_available) / (1024*1024), //stDevProp.totalGlobalMem
((float) cudaMem_total) / (1024*1024),
((float) g_iSizeRead) / (1024 * 1024),
((float) g_iNumSubBands * pfbParams.samples * sizeof(float2)) / (1024 * 1024),
((float) g_iNumSubBands * pfbParams.samples * sizeof(float2)) / (1024 * 1024),
((float) g_iNumSubBands * g_iNFFT * sizeof(float)));
/*************************/
/* Load PFB coefficients */
/*************************/
(void) fprintf(stdout, "\nSetting up PFB filter coefficients...\n");
int sizePFB = g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float);
// Allocate memory for PFB coefficients to be read in
g_pfPFBCoeff = (float *) malloc(sizePFB); // allocate the memory needed for the size of one pfb pass through
if(NULL == g_pfPFBCoeff) {
(void) fprintf(stderr, "ERROR: Memory allocation for the PFB coefficients failed. %s\n",
strerror(errno));
return EXIT_FAILURE;
}
// Read filter coefficients from file
(void) fprintf(stdout, "\tReading in coefficients...\n");
(void) sprintf(g_acFileCoeff,
"%s%s_%s_%d_%d_%d%s",
coeffLoc,
FILE_COEFF_PREFIX,
FILE_COEFF_DATATYPE,
g_iNTaps,
g_iNFFT,
g_iNumSubBands,
FILE_COEFF_SUFFIX);
g_iFileCoeff = open(g_acFileCoeff, O_RDONLY);
if(g_iFileCoeff < EXIT_SUCCESS) {
(void) fprintf(stderr, "ERROR: Failed to open coefficient file %s. %s\n",
g_acFileCoeff,
strerror(errno));
return EXIT_FAILURE;
}
iRet = read(g_iFileCoeff, g_pfPFBCoeff, sizePFB);
if(iRet != sizePFB) {
(void) fprintf(stderr, "ERROR: Failed reading filter coefficients. %s\n", strerror(errno));
return EXIT_FAILURE;
}
(void) close(g_iFileCoeff);
/********************************************/
/* Allocate memory and setup on CUDA device */
/********************************************/
(void) fprintf(stdout, "\nSetting up CUDA device.\n");
//malloc map array and copy data to device
(void) fprintf(stdout, "\tAllocating memory for MAP...\n");
// creates a size that is paddedd in the front to store the filter state. Worth one 256 (nfft*taps) time sample amount of data
int sizeMap = pfbParams.samples * pfbParams.fine_channels * pfbParams.elements * (2*sizeof(char)) + pfbParams.fine_channels*pfbParams.elements*pfbParams.nfft*pfbParams.taps * (2*sizeof(char));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pcInputData_d, g_iSizeRead));
CUDASafeCallWithCleanUp(hipMemset((void *) g_pcInputData_d, 0, g_iSizeRead));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pc2Data_d, sizeMap));
CUDASafeCallWithCleanUp(hipMemset((void *) g_pc2Data_d, 0, sizeMap));
// allocate memory for pfb coefficients on GPU
(void) fprintf(stdout, "\tAllocating memory for PFB...\n");
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pfPFBCoeff_d, sizePFB));
// copy coeff to device
(void) fprintf(stdout, "\tCopying filter coefficients...\n");
CUDASafeCallWithCleanUp(hipMemcpy(g_pfPFBCoeff_d, g_pfPFBCoeff, sizePFB, hipMemcpyHostToDevice));
// allocate memory for FFT in and out arrays
(void) fprintf(stdout, "\tAllocate memory for FFT arrays...\n");
//int sizeDataBlock_in = g_iNumSubBands * g_iNFFT * sizeof(float2);
int sizeDataBlock_in = pfbParams.samples*g_iNumSubBands * sizeof(float2);
int sizeTotalDataBlock_out = pfbParams.samples*g_iNumSubBands * sizeof(float2); // output fft array same size as output data for convinence the full size is not used. In the pfb function the output data will be the fft counter times block amount in the fft.
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf2FFTIn_d, sizeDataBlock_in));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf2FFTOut_d, sizeTotalDataBlock_out)); // goal will be to update the output ptr each time it fires.
CUDASafeCallWithCleanUp(hipMemset((void *) g_pf2FFTIn_d, 0, sizeDataBlock_in));
CUDASafeCallWithCleanUp(hipMemset((void *) g_pf2FFTOut_d, 0, sizeTotalDataBlock_out));
// set kernel parameters
(void) fprintf(stdout, "\tSetting kernel parameters...\n");
if(g_iNFFT < g_iMaxThreadsPerBlock) {
g_dimBPFB.x = g_iNFFT;
g_dimBCopy.x = g_iNFFT;
} else {
g_dimBPFB.x = g_iMaxThreadsPerBlock;
g_dimBCopy.x = g_iMaxThreadsPerBlock;
}
g_dimGPFB.x = (g_iNumSubBands * g_iNFFT) / g_dimBPFB.x;
g_dimGCopy.x = (g_iNumSubBands * g_iNFFT) / g_dimBCopy.x;
g_dimGPFB.y = 125;
g_dimGCopy.y = 125;
// map kernel params
mapGSize.x = pfbParams.samples;
mapGSize.y = pfbParams.fine_channels;
mapGSize.z = 1;
mapBSize.x = 1;
mapBSize.y = pfbParams.elements;
mapBSize.z = 1;
// copy kernel params
saveGSize.x = pfbParams.fine_channels;
saveGSize.y = pfbParams.nfft*pfbParams.taps;
saveGSize.z = 1;
saveBSize.x = pfbParams.elements;
saveBSize.y = 1;
saveBSize.z = 1;
(void) fprintf(stdout, "\t\tPFB Kernel Parmaters are:\n\t\tgridDim(%d,%d,%d) blockDim(%d,%d,%d)\n\n",
g_dimGPFB.x, g_dimGPFB.y, g_dimGPFB.z,
g_dimBPFB.x, g_dimBPFB.y, g_dimBPFB.z);
(void) fprintf(stdout, "\t\tMAP Kernel Parmaters are:\n\t\tgridDim(%d,%d,%d) blockDim(%d,%d,%d)\n\n",
mapGSize.x, mapGSize.y, mapGSize.z,
mapBSize.x, mapBSize.y, mapBSize.z);
(void) fprintf(stdout, "\t\tSave Kernel Parmaters are:\n\t\tgridDim(%d,%d,%d) blockDim(%d,%d,%d)\n",
saveGSize.x, saveGSize.y, saveGSize.z,
saveBSize.x, saveBSize.y, saveBSize.z);
// create a CUFFT plan
(void) fprintf(stdout, "\tCreating cuFFT plan...\n");
iCUFFTRet = hipfftPlanMany(&g_stPlan,
FFTPLAN_RANK,
&g_iNFFT,
&g_iNFFT,
FFTPLAN_ISTRIDE,
FFTPLAN_IDIST,
&g_iNFFT,
FFTPLAN_OSTRIDE,
FFTPLAN_ODIST,
HIPFFT_C2C,
FFTPLAN_BATCH);
if(iCUFFTRet != HIPFFT_SUCCESS) {
(void) fprintf(stderr, "ERROR: Plan creation failed!\n");
return EXIT_FAILURE;
}
fprintf(stdout, "\nDevice for PFB successfully initialized!\n");
return EXIT_SUCCESS;
}
int resetDevice() {
hipError_t cuErr = hipDeviceReset();
if (cuErr != hipSuccess) {
fprintf(stderr, "Device Reset Failed.\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
/* do fft on pfb data */
int doFFT()
{
hipfftResult iCUFFTRet = HIPFFT_SUCCESS;
/* execute plan */
iCUFFTRet = hipfftExecC2C(g_stPlan,
(hipfftComplex*) g_pf2FFTIn_d,
(hipfftComplex*) g_pf2FFTOut_d,
HIPFFT_FORWARD);
if (iCUFFTRet != HIPFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR! FFT failed!\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
void __CUDASafeCallWithCleanUp(hipError_t iRet,
const char* pcFile,
const int iLine,
void (*pcleanUp)(void))
{
if (iRet != hipSuccess)
{
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
pcFile,
iLine,
hipGetErrorString(iRet));
/* free resources */
(*pcleanUp)();
exit(EXIT_FAILURE);
}
return;
}
void cleanUp() {
/* free resources */
if (g_pc2InBuf != NULL) {
free(g_pc2InBuf);
g_pc2InBuf = NULL;
}
if (g_pc2Data_d != NULL) {
(void) hipFree(g_pc2Data_d);
g_pc2Data_d = NULL;
}
if (g_pf2FFTIn_d != NULL) {
(void) hipFree(g_pf2FFTIn_d);
g_pf2FFTIn_d = NULL;
}
if (g_pf2FFTOut_d != NULL) {
(void) hipFree(g_pf2FFTOut_d);
g_pf2FFTOut_d = NULL;
}
free(g_pfPFBCoeff);
(void) hipFree(g_pfPFBCoeff_d);
/* destroy plan */
/* TODO: check for plan */
(void) hipfftDestroy(g_stPlan);
return;
}
| 30b22fb119fbccae7762b3e7a07726a1f04ca610.cu | #ifdef __cplusplus
extern "C" {
#include "pfb.h"
}
#endif
// data ptrs
char2* g_pc2InBuf = NULL;
char2* g_pc2InBufRead = NULL;
char2* g_pc2Data_d = NULL;
char2* g_pc2DataRead_d = NULL;
float2* g_pf2FFTIn_d = NULL;
float2* g_pf2FFTOut_d = NULL;
float *g_pfPFBCoeff = NULL;
float *g_pfPFBCoeff_d = NULL;
char* g_pcInputData_d = NULL;
// pfb params
int g_iNFFT = DEF_LEN_SPEC;
int g_iNTaps = NUM_TAPS;
int g_iNumSubBands = PFB_CHANNELS * DEF_NUM_ELEMENTS;
// process flags
int g_IsDataReadDone = FALSE;
int g_IsProcDone = FALSE;
// size vars
int g_iSizeFile = 0;
int g_iReadCount = 0;
int g_iSizeRead = DEF_SIZE_READ;
int g_iFileCoeff = 0;
char g_acFileCoeff[256] = {0};
// GPU params
dim3 g_dimBPFB(1, 1, 1);
dim3 g_dimGPFB(1, 1);
dim3 g_dimBCopy(1, 1, 1);
dim3 g_dimGCopy(1, 1);
dim3 mapGSize(1,1,1);
dim3 mapBSize(1,1,1);
dim3 saveGSize(1, 1, 1 ); // (5, 256, 1)
dim3 saveBSize(1, 1, 1); // (64, 1, 1)
cufftHandle g_stPlan = {0};
int g_iMaxThreadsPerBlock = 0;
int g_iMaxPhysThreads = 0;
int runPFB(signed char* inputData_h, float* outputData_h, params pfbParams) {
g_IsProcDone = FALSE;
int iRet = EXIT_SUCCESS;
long lProcData = 0;
long ltotData = pfbParams.fine_channels*pfbParams.elements*(pfbParams.samples + pfbParams.nfft*pfbParams.taps);
int start = pfbParams.fine_channels*pfbParams.elements*pfbParams.nfft*pfbParams.taps;
int countFFT = 0;
int cpySize = pfbParams.fine_channels*pfbParams.elements*pfbParams.samples*(2*sizeof(char));
// copy data to device
//CUDASafeCallWithCleanUp(cudaMemcpy(g_pcInputData_d, inputData_h, g_iSizeRead, cudaMemcpyHostToDevice)); //g_iSizeRead = samples*coarse_channels*elements*(2*sizeof(char));
CUDASafeCallWithCleanUp(cudaMemcpy(&g_pc2Data_d[start], inputData_h, cpySize, cudaMemcpyHostToDevice));
// map - extract channel data from full data stream and load into buffer.
//map<<<mapGSize, mapBSize>>>(g_pcInputData_d, &g_pc2Data_d[start], pfbParams.select, pfbParams);
//CUDASafeCallWithCleanUp(cudaGetLastError());
// Begin PFB
g_pc2DataRead_d = g_pc2Data_d; // p_pc2Data_d contains all the data. DataRead will update with each pass through the PFB.
int pfb_on = 1; // Enable pfb flag. Extendable.
if(pfb_on) {
//PFB
PFB_kernel<<<g_dimGPFB, g_dimBPFB>>>(g_pc2DataRead_d, g_pf2FFTIn_d, g_pfPFBCoeff_d, pfbParams);
CUDASafeCallWithCleanUp(cudaThreadSynchronize());
} else {
// Prepare for FFT
CopyDataForFFT<<<g_dimGPFB, g_dimBPFB>>>(g_pc2DataRead_d, g_pf2FFTIn_d);
CUDASafeCallWithCleanUp(cudaGetLastError());
}
//float2* fftOutPtr = g_pf2FFTOut_d;
while(!g_IsProcDone) {
//FFT
iRet = doFFT();
if(iRet != EXIT_SUCCESS) {
(void) fprintf(stderr, "ERROR: FFT failed\n");
cleanUp();
return EXIT_FAILURE;
}
CUDASafeCallWithCleanUp(cudaGetLastError());
++countFFT;
// step input and output buffers.
g_pf2FFTIn_d += g_iNumSubBands * g_iNFFT;
g_pf2FFTOut_d += g_iNumSubBands * g_iNFFT;
lProcData += g_iNumSubBands * g_iNFFT;
if(lProcData >= ltotData - NUM_TAPS*g_iNumSubBands*g_iNFFT){ // >= process 117 ffts leaving 256 time samples, > process 118 ffts leaving 224 time samples.
g_IsProcDone = TRUE;
}
}
// prepare next filter
g_pc2DataRead_d += countFFT*g_iNumSubBands*g_iNFFT;
saveData<<<saveGSize, saveBSize>>>(g_pc2DataRead_d, g_pc2Data_d);
CUDASafeCallWithCleanUp(cudaGetLastError());
// copy back to host.
//wind back in/out ptrs - should put in another pointer as a process read ptr instead of updating the global ptr.
g_pf2FFTOut_d = g_pf2FFTOut_d - countFFT*g_iNumSubBands*g_iNFFT;
g_pf2FFTIn_d = g_pf2FFTIn_d -countFFT*g_iNumSubBands*g_iNFFT;
int outDataSize = countFFT * g_iNumSubBands * g_iNFFT;
//CUDASafeCallWithCleanUp(cudaMemcpy(outputData_h, fftOutPtr, outDataSize*sizeof(cufftComplex), cudaMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(cudaMemcpy(outputData_h, g_pf2FFTOut_d, outDataSize*sizeof(cufftComplex), cudaMemcpyDeviceToHost));
return iRet;
}
void flushBuffer(params pfbParams) {
int start = pfbParams.fine_channels*pfbParams.elements*pfbParams.nfft*pfbParams.taps;
CUDASafeCallWithCleanUp(cudaMemset((void *) g_pc2Data_d, 0, start*2*sizeof(char)));
return;
}
// return true or false upon successful setup.
int initPFB(int iCudaDevice, params pfbParams){
int iRet = EXIT_SUCCESS;
// set pfb params from input parameters.
pfbParams.subbands = pfbParams.elements*pfbParams.fine_channels;
g_iNFFT = pfbParams.nfft;
g_iNTaps = pfbParams.taps;
g_iNumSubBands = pfbParams.subbands; // equal to elements*fine_channels. (The fine channels are the channels processed.)
g_iSizeRead = pfbParams.samples*pfbParams.coarse_channels*pfbParams.elements*(2*sizeof(char));
char* coeffLoc = pfbParams.coeffPath;
int iDevCount = 0;
cudaDeviceProp stDevProp = {0};
cufftResult iCUFFTRet = CUFFT_SUCCESS;
int i = 0;
//Register signal handlers?
/********************************************/
/* Look for eligable Cuda Device and select */
/********************************************/
(void) fprintf(stdout, "Querying CUDA devices.\n");
(void) cudaGetDeviceCount(&iDevCount);
if (0 == iDevCount) {
(void) fprintf(stderr, "ERROR: No CUDA-capable device found!\n");
return EXIT_FAILURE;
}
// Look for requested device (if applicable)
if (iCudaDevice >= iDevCount) {
(void) fprintf(stderr,
"ERROR: Requested device %d no found in present %d device list.\n",
iCudaDevice,
iDevCount);
return EXIT_FAILURE;
}
// Query devices and setup selected device.
for(i = 0; i < iDevCount; i++) {
CUDASafeCallWithCleanUp(cudaGetDeviceProperties(&stDevProp, i));
printf("\tDevice %d: %s, Compute Capability %d.%d, %d physical threads %s\n",
i,
stDevProp.name, stDevProp.major, stDevProp.minor,
stDevProp.multiProcessorCount * stDevProp.maxThreadsPerMultiProcessor,
(iCudaDevice == i) ? "<<SELECTED>>" : "");
}
CUDASafeCallWithCleanUp(cudaSetDevice(iCudaDevice));
// Setup block and thread paramters
CUDASafeCallWithCleanUp(cudaGetDeviceProperties(&stDevProp, 0));
g_iMaxThreadsPerBlock = stDevProp.maxThreadsPerBlock;
g_iMaxPhysThreads = stDevProp.multiProcessorCount * stDevProp.maxThreadsPerMultiProcessor;
// Check if valid operation lengths. i.e. The input buffer is long enough (should this be done here or elsewhere?)
// Set malloc size - lTotCUDAMalloc is used only to calculate the total amount of memory not used for the allocation.
size_t cudaMem_total, cudaMem_available;
size_t lTotCUDAMalloc = 0;
cudaMemGetInfo(&cudaMem_available, &cudaMem_total);
lTotCUDAMalloc += g_iSizeRead; // size data
lTotCUDAMalloc += (g_iNumSubBands * pfbParams.samples * sizeof(float(2))); // size of FFT input array This should be different since our data is unsigned char?
lTotCUDAMalloc += (g_iNumSubBands * pfbParams.samples * sizeof(float(2))); // size of FFT output array
lTotCUDAMalloc += (g_iNumSubBands * g_iNFFT * sizeof(float)); // size of PFB Coefficients
// Check CUDA device can handle the memory request
if(lTotCUDAMalloc > stDevProp.totalGlobalMem) {
(void) fprintf(stderr,
"ERROR: Total memory requested on GPU is %g MB of %g possible MB (Total Global Memory: %g MB).\n"
"\t**** Memory breakdown *****\n"
"\tInput data buffer:\t%g MB\n"
"\tFFT in array:\t%g MB\n"
"\tFFT out array:\t%g MB\n"
"\tPFB Coefficients: %f KB\n",
((float) lTotCUDAMalloc) / (1024*1024),
((float) cudaMem_available) / (1024*1024), //stDevProp.totalGlobalMem
((float) cudaMem_total) / (1024*1024),
((float) g_iSizeRead) / (1024 * 1024),
((float) g_iNumSubBands * pfbParams.samples * sizeof(float2)) / (1024 * 1024),
((float) g_iNumSubBands * pfbParams.samples * sizeof(float2)) / (1024 * 1024),
((float) g_iNumSubBands * g_iNFFT * sizeof(float)));
return EXIT_FAILURE;
}
// print memory usage report.
(void) fprintf(stdout,
"INFO: Total memory requested on GPU is %g MB of %g possible MB (Total Global Memory: %g MB).\n"
"\t**** Memory breakdown ****\n"
"\tInput data buffer:\t%g MB\n"
"\tFFT in array:\t%g MB\n"
"\tFFT out array:\t%g MB\n"
"\tPFB Coefficients: %f KB\n",
((float) lTotCUDAMalloc) / (1024*1024),
((float) cudaMem_available) / (1024*1024), //stDevProp.totalGlobalMem
((float) cudaMem_total) / (1024*1024),
((float) g_iSizeRead) / (1024 * 1024),
((float) g_iNumSubBands * pfbParams.samples * sizeof(float2)) / (1024 * 1024),
((float) g_iNumSubBands * pfbParams.samples * sizeof(float2)) / (1024 * 1024),
((float) g_iNumSubBands * g_iNFFT * sizeof(float)));
/*************************/
/* Load PFB coefficients */
/*************************/
(void) fprintf(stdout, "\nSetting up PFB filter coefficients...\n");
int sizePFB = g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float);
// Allocate memory for PFB coefficients to be read in
g_pfPFBCoeff = (float *) malloc(sizePFB); // allocate the memory needed for the size of one pfb pass through
if(NULL == g_pfPFBCoeff) {
(void) fprintf(stderr, "ERROR: Memory allocation for the PFB coefficients failed. %s\n",
strerror(errno));
return EXIT_FAILURE;
}
// Read filter coefficients from file
(void) fprintf(stdout, "\tReading in coefficients...\n");
(void) sprintf(g_acFileCoeff,
"%s%s_%s_%d_%d_%d%s",
coeffLoc,
FILE_COEFF_PREFIX,
FILE_COEFF_DATATYPE,
g_iNTaps,
g_iNFFT,
g_iNumSubBands,
FILE_COEFF_SUFFIX);
g_iFileCoeff = open(g_acFileCoeff, O_RDONLY);
if(g_iFileCoeff < EXIT_SUCCESS) {
(void) fprintf(stderr, "ERROR: Failed to open coefficient file %s. %s\n",
g_acFileCoeff,
strerror(errno));
return EXIT_FAILURE;
}
iRet = read(g_iFileCoeff, g_pfPFBCoeff, sizePFB);
if(iRet != sizePFB) {
(void) fprintf(stderr, "ERROR: Failed reading filter coefficients. %s\n", strerror(errno));
return EXIT_FAILURE;
}
(void) close(g_iFileCoeff);
/********************************************/
/* Allocate memory and setup on CUDA device */
/********************************************/
(void) fprintf(stdout, "\nSetting up CUDA device.\n");
//malloc map array and copy data to device
(void) fprintf(stdout, "\tAllocating memory for MAP...\n");
// creates a size that is paddedd in the front to store the filter state. Worth one 256 (nfft*taps) time sample amount of data
int sizeMap = pfbParams.samples * pfbParams.fine_channels * pfbParams.elements * (2*sizeof(char)) + pfbParams.fine_channels*pfbParams.elements*pfbParams.nfft*pfbParams.taps * (2*sizeof(char));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pcInputData_d, g_iSizeRead));
CUDASafeCallWithCleanUp(cudaMemset((void *) g_pcInputData_d, 0, g_iSizeRead));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pc2Data_d, sizeMap));
CUDASafeCallWithCleanUp(cudaMemset((void *) g_pc2Data_d, 0, sizeMap));
// allocate memory for pfb coefficients on GPU
(void) fprintf(stdout, "\tAllocating memory for PFB...\n");
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pfPFBCoeff_d, sizePFB));
// copy coeff to device
(void) fprintf(stdout, "\tCopying filter coefficients...\n");
CUDASafeCallWithCleanUp(cudaMemcpy(g_pfPFBCoeff_d, g_pfPFBCoeff, sizePFB, cudaMemcpyHostToDevice));
// allocate memory for FFT in and out arrays
(void) fprintf(stdout, "\tAllocate memory for FFT arrays...\n");
//int sizeDataBlock_in = g_iNumSubBands * g_iNFFT * sizeof(float2);
int sizeDataBlock_in = pfbParams.samples*g_iNumSubBands * sizeof(float2);
int sizeTotalDataBlock_out = pfbParams.samples*g_iNumSubBands * sizeof(float2); // output fft array same size as output data for convinence the full size is not used. In the pfb function the output data will be the fft counter times block amount in the fft.
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf2FFTIn_d, sizeDataBlock_in));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf2FFTOut_d, sizeTotalDataBlock_out)); // goal will be to update the output ptr each time it fires.
CUDASafeCallWithCleanUp(cudaMemset((void *) g_pf2FFTIn_d, 0, sizeDataBlock_in));
CUDASafeCallWithCleanUp(cudaMemset((void *) g_pf2FFTOut_d, 0, sizeTotalDataBlock_out));
// set kernel parameters
(void) fprintf(stdout, "\tSetting kernel parameters...\n");
if(g_iNFFT < g_iMaxThreadsPerBlock) {
g_dimBPFB.x = g_iNFFT;
g_dimBCopy.x = g_iNFFT;
} else {
g_dimBPFB.x = g_iMaxThreadsPerBlock;
g_dimBCopy.x = g_iMaxThreadsPerBlock;
}
g_dimGPFB.x = (g_iNumSubBands * g_iNFFT) / g_dimBPFB.x;
g_dimGCopy.x = (g_iNumSubBands * g_iNFFT) / g_dimBCopy.x;
g_dimGPFB.y = 125;
g_dimGCopy.y = 125;
// map kernel params
mapGSize.x = pfbParams.samples;
mapGSize.y = pfbParams.fine_channels;
mapGSize.z = 1;
mapBSize.x = 1;
mapBSize.y = pfbParams.elements;
mapBSize.z = 1;
// copy kernel params
saveGSize.x = pfbParams.fine_channels;
saveGSize.y = pfbParams.nfft*pfbParams.taps;
saveGSize.z = 1;
saveBSize.x = pfbParams.elements;
saveBSize.y = 1;
saveBSize.z = 1;
(void) fprintf(stdout, "\t\tPFB Kernel Parmaters are:\n\t\tgridDim(%d,%d,%d) blockDim(%d,%d,%d)\n\n",
g_dimGPFB.x, g_dimGPFB.y, g_dimGPFB.z,
g_dimBPFB.x, g_dimBPFB.y, g_dimBPFB.z);
(void) fprintf(stdout, "\t\tMAP Kernel Parmaters are:\n\t\tgridDim(%d,%d,%d) blockDim(%d,%d,%d)\n\n",
mapGSize.x, mapGSize.y, mapGSize.z,
mapBSize.x, mapBSize.y, mapBSize.z);
(void) fprintf(stdout, "\t\tSave Kernel Parmaters are:\n\t\tgridDim(%d,%d,%d) blockDim(%d,%d,%d)\n",
saveGSize.x, saveGSize.y, saveGSize.z,
saveBSize.x, saveBSize.y, saveBSize.z);
// create a CUFFT plan
(void) fprintf(stdout, "\tCreating cuFFT plan...\n");
iCUFFTRet = cufftPlanMany(&g_stPlan,
FFTPLAN_RANK,
&g_iNFFT,
&g_iNFFT,
FFTPLAN_ISTRIDE,
FFTPLAN_IDIST,
&g_iNFFT,
FFTPLAN_OSTRIDE,
FFTPLAN_ODIST,
CUFFT_C2C,
FFTPLAN_BATCH);
if(iCUFFTRet != CUFFT_SUCCESS) {
(void) fprintf(stderr, "ERROR: Plan creation failed!\n");
return EXIT_FAILURE;
}
fprintf(stdout, "\nDevice for PFB successfully initialized!\n");
return EXIT_SUCCESS;
}
int resetDevice() {
cudaError_t cuErr = cudaDeviceReset();
if (cuErr != cudaSuccess) {
fprintf(stderr, "Device Reset Failed.\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
/* do fft on pfb data */
int doFFT()
{
cufftResult iCUFFTRet = CUFFT_SUCCESS;
/* execute plan */
iCUFFTRet = cufftExecC2C(g_stPlan,
(cufftComplex*) g_pf2FFTIn_d,
(cufftComplex*) g_pf2FFTOut_d,
CUFFT_FORWARD);
if (iCUFFTRet != CUFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR! FFT failed!\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
void __CUDASafeCallWithCleanUp(cudaError_t iRet,
const char* pcFile,
const int iLine,
void (*pcleanUp)(void))
{
if (iRet != cudaSuccess)
{
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
pcFile,
iLine,
cudaGetErrorString(iRet));
/* free resources */
(*pcleanUp)();
exit(EXIT_FAILURE);
}
return;
}
void cleanUp() {
/* free resources */
if (g_pc2InBuf != NULL) {
free(g_pc2InBuf);
g_pc2InBuf = NULL;
}
if (g_pc2Data_d != NULL) {
(void) cudaFree(g_pc2Data_d);
g_pc2Data_d = NULL;
}
if (g_pf2FFTIn_d != NULL) {
(void) cudaFree(g_pf2FFTIn_d);
g_pf2FFTIn_d = NULL;
}
if (g_pf2FFTOut_d != NULL) {
(void) cudaFree(g_pf2FFTOut_d);
g_pf2FFTOut_d = NULL;
}
free(g_pfPFBCoeff);
(void) cudaFree(g_pfPFBCoeff_d);
/* destroy plan */
/* TODO: check for plan */
(void) cufftDestroy(g_stPlan);
return;
}
|
44b27c419dfedcac010e55dd7f735d43122afeda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/hip/HIPApplyUtils.cuh"
#include "ATen/hip/HIPContext.h"
#include "ATen/NativeFunctions.h"
#include "ATen/TensorUtils.h"
#include "ATen/Utils.h"
#include "c10/util/Exception.h"
#include <THH/THHAtomics.cuh>
#include <THH/THHGeneral.h>
#include "THH/THHNumerics.cuh"
#include <ATen/native/hip/LaunchUtils.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <algorithm>
#include <cfloat>
#include <cmath>
#define START_IND(a,b,c) (int)::floor((float)(a * c) / b)
#define END_IND(a,b,c) (int)::ceil((float)((a + 1) * c) / b)
#define START_IND_INT(a,b,c) ((a * c) / b)
#define END_IND_INT(a,b,c) (((a + 1) * c + b - 1) / b)
// #define START_IND(a,b,c) a * c / b
// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
#define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched
namespace at {
namespace native {
namespace {
// 4d tensor B x D x H x W
// All kernels view batch dim B and feature dim D as collapsed.
/*
* Description:
* this function adaptively average pools an input 4D tensor along dimensions 2 and 3
* 4D input, 4D output
*/
template <typename T>
__global__ void adaptive_average_pool(T *input, T *output,
int isizeH, int isizeW,
int osizeH, int osizeW,
int64_t istrideD, int64_t istrideH, int64_t istrideW)
{
// iterators on output pixels
int oh, ow;
// select input/output plane based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
output = output + o_plane*osizeH*osizeW;
input = input + i_plane*istrideD;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
const int ostepH = blockDim.y*gridDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
const int ostepW = blockDim.x;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the average pooling over corresponding input pixels
T *ptr_input = input + istartH*istrideH + istartW*istrideW;
T *ptr_output = output + oh*osizeW + ow;
T sum = ScalarConvert<int, T>::to(0);
int ih, iw;
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
T val = ptr_input[iw*istrideW];
sum += val;
}
ptr_input += istrideH; // next input line
}
// Update output
*ptr_output = sum / kH / kW;
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
*/
template <typename T>
__global__ void adaptive_average_gradinput(
T *gradInput, T *gradOutput,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators on input pixels
int ih, iw;
// select input/output plane based on thread/block ID
int i_plane = blockIdx.x;
int o_plane = i_plane;
gradOutput = gradOutput + o_plane*osizeH*osizeW;
gradInput = gradInput + i_plane*isizeH*isizeW;
int istartH = blockDim.y*blockIdx.y + threadIdx.y;
int iendH = isizeH;
int istepH = blockDim.y*gridDim.y;
int istartW = threadIdx.x;
int iendW = isizeW;
int istepW = blockDim.x;
// compute gradInput
for(ih = istartH; ih < iendH; ih += istepH) {
int ostartH = START_IND(ih, isizeH, osizeH);
int oendH = END_IND(ih, isizeH, osizeH);
for(iw = istartW; iw < iendW; iw += istepW) {
int ostartW = START_IND(iw, isizeW, osizeW);
int oendW = END_IND(iw, isizeW, osizeW);
// Compute the gradients over corresponding output pixels
T *ptr_gradInput = gradInput + ih*isizeW + iw;
int oh, ow;
for(oh = ostartH; oh < oendH; ++oh) {
int kH = START_IND(oh, osizeH, isizeH) - END_IND(oh, osizeH, isizeH);
for(ow = ostartW; ow < oendW; ++ow) {
int kW = START_IND(ow, osizeW, isizeW) - END_IND(ow, osizeW, isizeW);
T grad_delta = gradOutput[ow + oh*osizeW] / kH / kW;
*ptr_gradInput += grad_delta;
}
}
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
* (uses atomic add)
*/
template <typename T>
__global__ void atomic_adaptive_average_gradinput(
T *gradInput, T *gradOutput,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators on output indices
int oh, ow;
// select input/output plane based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
gradOutput = gradOutput + o_plane*osizeW*osizeH;
gradInput = gradInput + i_plane*isizeW*isizeH;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
int ostepH = blockDim.y*gridDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the gradients for over corresponding input pixels
T *ptr_gradInput = gradInput + istartH*isizeW + istartW;
T *ptr_gradOutput = gradOutput + oh*osizeW + ow;
T grad_delta = *ptr_gradOutput / kW / kH;
int ih, iw;
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
// atomic add since different threads could update same variable
gpuAtomicAdd(&(ptr_gradInput[iw]), grad_delta);
}
ptr_gradInput += isizeW; // next input line
}
}
}
}
/*
* Description:
* this function adaptively average pools an input 4D tensor along dimensions 2 and 3
* NHWC layout for both input and output tensor
* 4D input, 4D output
*/
template <typename index_t, typename scalar_t>
C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)
__global__ void adaptive_average_pool_nhwc(const scalar_t* __restrict__ input, scalar_t* __restrict__ output,
int sizeB, int sizeC,
int isizeH, int isizeW,
int osizeH, int osizeW,
int kernel_stride_C, int kernel_size_C,
index_t istrideB, index_t istrideC,
index_t istrideH, index_t istrideW)
{
extern __shared__ int smem[];
scalar_t *out_cached = reinterpret_cast<scalar_t*>(smem);
// flattening cta for pre-computation & smem initialization;
int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
int block_size = blockDim.x * blockDim.y * blockDim.z;
// use shared memory to store temporary output value. This is simply to
// reduce register usage.
for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {
out_cached[i] = scalar_t(0.0);
}
__syncthreads();
// each CTA handles a portion of a single slice on batch dimension;
int batch_id = blockIdx.x % sizeB;
int channel_id = blockIdx.x / sizeB;
int channel_offset = threadIdx.x + channel_id * blockDim.x;
// each CTA handles a single slice on batch dimension;
// We use gridDim.x to handle striding on C as well.
output = output + batch_id * osizeH * osizeW * sizeC;
input = input + batch_id * istrideB;
// split out_cached and exclusively it assigned to each thread;
out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C * blockDim.x];
// iterate on output H & W.
// Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on
// tile so there's a better chance to hit L1 cache.
index_t oH = (osizeH + gridDim.z-1) / gridDim.z;
index_t oW = (osizeW + gridDim.y-1) / gridDim.y;
index_t ostartH = threadIdx.z + blockIdx.z*oH;
index_t oendH = ::min(ostartH+oH, osizeH);
index_t ostartW = threadIdx.y + blockIdx.y*oW;
index_t oendW = ::min(ostartW+oW, osizeW);
// Stride for threads, each warp can reuse L1 as they go. So theoretically
// better chance to survive cache eviction.
for (int oh = ostartH; oh < oendH; oh+=blockDim.z) {
int istartH = START_IND_INT(oh, osizeH, isizeH);
int iendH = END_IND_INT(oh, osizeH, isizeH);
for (int ow = ostartW; ow < oendW; ow+=blockDim.y) {
int istartW = START_IND_INT(ow, osizeW, isizeW);
int iendW = END_IND_INT(ow, osizeW, isizeW);
scalar_t factor = scalar_t(1.0) / ((iendH-istartH) * (iendW-istartW));
// loop on input: hierarchy h->w->c, use shared memory here hopefully
// would not stall global memory read;
for (index_t ih = istartH; ih < iendH; ih++) {
for (index_t iw = istartW; iw < iendW; iw++) {
int cached_index = threadIdx.x;
const scalar_t *ptr_input = input + ih*istrideH + iw*istrideW;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
out_cached[cached_index] += ptr_input[c*istrideC];
cached_index += blockDim.x;
}
}
}
scalar_t *ptr_output = output + (oh * osizeW + ow) * sizeC;
int cached_index = threadIdx.x;
// write accumulated output to global memory;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
// This causes numerical issueptr when unit test with NCHW kernel;
// switch to could verify the correctness;
// output[c] = out_cached[c] / (iendH-istartH) / (iendW-istartW);
ptr_output[c] = out_cached[cached_index] * factor;
out_cached[cached_index] = scalar_t(0.0);
cached_index += blockDim.x;
}
// no need to __syncthreads() since out_cached is not shared.
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
* NHWC layout for both input and output tensor
* 4D input, 4D output
*/
template <typename index_t, typename scalar_t>
C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)
__global__ void adaptive_average_gradinput_nhwc(scalar_t* __restrict__ gradInput, const scalar_t* __restrict__ gradOutput,
int sizeB, int sizeC,
int isizeH, int isizeW,
int osizeH, int osizeW,
int kernel_stride_C, int kernel_size_C,
index_t ostrideB, index_t ostrideC,
index_t ostrideH, index_t ostrideW)
{
extern __shared__ int smem[];
index_t *ostartW_cached = smem;
index_t *oendW_cached = &ostartW_cached[isizeW];
// be careful with alignment, in case scalar_t is fp16, we want to assign
// int pointers first.
scalar_t *r_kW_cached = reinterpret_cast<scalar_t*>(&oendW_cached[isizeW]);
scalar_t *r_kH_cached = &r_kW_cached[osizeW];
scalar_t *out_cached = &r_kH_cached[osizeH];
// flattening cta for pre-computation & smem initialization;
int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
int block_size = blockDim.x * blockDim.y * blockDim.z;
// Precompute output start/end index per input index on width dimension;
// Not doing this for height dimension, as that's our out-most loop.
for (index_t i = thread_id; i < isizeW; i+= block_size) {
ostartW_cached[i] = START_IND_INT(i, isizeW, osizeW);
oendW_cached[i] = END_IND_INT(i, isizeW, osizeW);
}
// Precompute pooling height/weight factor for each output element;
// This is used to weight output gradient when accumulate them on input
// gradient.
// Technically we don't have to compute it for the whole `osizeH`, since
// each cta only covers a consecutive portion of the entire output. But it's
// not going to save us from code divergence, and shared memory save is not
// an issue neither, so just leave it as is for now.
for (index_t i = thread_id; i < osizeH; i+= block_size) {
r_kH_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeH, isizeH) - START_IND_INT(i, osizeH, isizeH));
}
for (index_t i = thread_id; i < osizeW; i+= block_size) {
r_kW_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeW, isizeW) - START_IND_INT(i, osizeW, isizeW));
}
// each CTA handles a portion of a single slice on batch dimension;
int batch_id = blockIdx.x % sizeB;
int channel_id = blockIdx.x / sizeB;
int channel_offset = threadIdx.x + channel_id * blockDim.x;
// use shared memory to store temporary output value. This is simply to
// reduce register usage.
for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {
out_cached[i] = scalar_t(0.0);
}
__syncthreads();
// each CTA handles a portion of a single slice on batch dimension;
// We use gridDim.x to handle striding on C as well.
gradInput = gradInput + batch_id * isizeH * isizeW * sizeC;
gradOutput = gradOutput + batch_id * ostrideB;
// split out_cached and exclusively it assigned to each thread;
out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x * kernel_size_C];
// iterate on input H & W.
// Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on
// tile so there's a better chance to hit L1 cache.
index_t iH = (isizeH + gridDim.z-1) / gridDim.z;
index_t iW = (isizeW + gridDim.y-1) / gridDim.y;
index_t istartH = threadIdx.z + blockIdx.z*iH;
index_t iendH = ::min(istartH+iH, isizeH);
index_t istartW = threadIdx.y + blockIdx.y*iW;
index_t iendW = ::min(istartW+iW, isizeW);
// Stride for threads, each warp can reuse L1 as they go. So theoretically
// better chance to survive cache eviction.
for (index_t ih = istartH; ih < iendH; ih+=blockDim.z) {
index_t ostartH = START_IND_INT(ih, isizeH, osizeH);
index_t oendH = END_IND_INT(ih, isizeH, osizeH);
for (index_t iw = istartW; iw < iendW; iw+=blockDim.y) {
// loop on output: hierarchy h->w->c, so we could reuse weight factor f
// because it remains the same for given oh & ow
for(index_t oh = ostartH; oh < oendH; ++oh) {
for(index_t ow = ostartW_cached[iw]; ow < oendW_cached[iw]; ++ow) {
scalar_t f = r_kW_cached[ow] * r_kH_cached[oh];
const scalar_t* ptr_gradOutput = gradOutput + oh*ostrideH + ow*ostrideW;
int cached_index = threadIdx.x;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
out_cached[cached_index] += ptr_gradOutput[c*ostrideC] * f;
cached_index += blockDim.x;
}
}
}
scalar_t *ptr_gradInput = gradInput + (ih * isizeW + iw) * sizeC;
int cached_index = threadIdx.x;
// write accumulated gradIput to global memory;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
ptr_gradInput[c] = out_cached[cached_index];
out_cached[cached_index] = scalar_t(0.0);
cached_index += blockDim.x;
}
// no need to __syncthreads() since out_cached is not shared.
}
}
}
// 4d tensor B x D x H x W
void adaptive_avg_pool2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size)
{
TensorArg input_arg{ input, "input", 1 },
output_arg{ output, "output", 2 };
checkAllSameGPU("cudnn_adaptive_avg_pooling2d", {input_arg, output_arg});
for (int64_t i = 0; i < input.ndimension(); i++) {
TORCH_CHECK(input.size(i) > 0,
"adaptive_avg_pooling2d(): expected input to have non-empty spatial dimensions, "
"but input has sizes ", input.sizes(), " with dimension ", i, " being "
"empty");
}
Tensor input_ = input;
switch (input.suggest_memory_format()) {
case at::MemoryFormat::ChannelsLast: {
// special case for tensor memory format in channels_last
TORCH_CHECK(input.ndimension() == 4,
"non-empty 4D (batch mode) tensor expected for input with channels_last layout");
int sizeB = input_.size(0);
int sizeC = input_.size(1);
int isizeH = input_.size(2);
int isizeW = input_.size(3);
int64_t istrideB = input_.stride(0);
int64_t istrideC = input_.stride(1);
int64_t istrideH = input_.stride(2);
int64_t istrideW = input_.stride(3);
int osizeH = output_size[0];
int osizeW = output_size[1];
// preserve channels_last stride on output tensor;
if (!output.is_contiguous(at::MemoryFormat::ChannelsLast)) {
// TODO: modify this after resize_ added `memory_format` tag
output.resize_({sizeB, sizeC, osizeH, osizeW}).as_strided_({sizeB, sizeC, osizeH, osizeW}, {sizeC*osizeH*osizeW, 1, osizeW*sizeC, sizeC});
}
const int max_threads = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize;
size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock;
// Launch kernel on output tensor elements. Logic behind launch config:
// output tensor size NCHW, strides NHWC;
// Launch on:
// N -> grid.x
// H -> grid.z * block.z
// W -> grid.y * block.y
// C -> block.x
// encourage larger block_y & block_z for better cache hit while maintain
// reasonable block_x for coalesced memory access;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size()));
int block_y = std::min<int>(
maxThreadsDim[1], std::min<int>(lastPow2(osizeW), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(lastPow2(osizeH), max_threads / block_x / block_y));
block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z));
const dim3 block(block_x, block_y, block_z);
int kernel_stride_C = cuda::ATenCeilDiv(sizeC, block_x * 4);
int kernel_size_C = cuda::ATenCeilDiv(sizeC, block_x * kernel_stride_C);
// Do NOT clip grid_x, striding on Batch dimension is not in the kernel,
// although it could be easily implemented given current kernel.
int grid_x = sizeB*kernel_stride_C;
// it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel;
int grid_y = std::min<int>(
maxGridSize[1], cuda::ATenCeilDiv(osizeW, block_y*BLOCK_STRIDE));
int grid_z = std::min<int>(
maxGridSize[2], cuda::ATenCeilDiv(osizeH, block_z*BLOCK_STRIDE));
const dim3 grid(grid_x, grid_y, grid_z);
// we are dealing with packed tensor here. max index is the same as numel.
// TODO: to really support input tensor large enought to go beyond int32,
// we will need to restrict out shared memory usage and adjust the launch
// config;
AT_ASSERT(input_.numel() < std::numeric_limits<int32_t>::max());
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input_.scalar_type(), "adaptive_avg_pool2d_nhwc_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_avg_pool2d_nhwc_cuda", [&] {
size_t shmem_size = (kernel_size_C * block_x * block_y * block_z) * sizeof(scalar_t);
AT_ASSERT(shmem_size <= sharedMemPerBlock);
hipLaunchKernelGGL(( adaptive_average_pool_nhwc<int32_t>), dim3(grid), dim3(block), shmem_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
sizeB, sizeC, isizeH, isizeW, osizeH, osizeW,
kernel_stride_C, kernel_size_C,
istrideB, istrideC, istrideH, istrideW);
});
}
);
break;
}
case at::MemoryFormat::Contiguous: {
TORCH_CHECK((input.ndimension() == 3 || input.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
int64_t grid_x = input.size(-3);
if (input.ndimension() == 4) {
input_ = input.contiguous();
grid_x *= input_.size(-4);
}
int64_t sizeD = input_.size(-3);
int64_t isizeH = input_.size(-2);
int64_t isizeW = input_.size(-1);
int64_t istrideD = input_.stride(-3);
int64_t istrideH = input_.stride(-2);
int64_t istrideW = input_.stride(-1);
int64_t osizeH = output_size[0];
int64_t osizeW = output_size[1];
if (input.ndimension() == 4) {
output.resize_({input_.size(-4), sizeD, osizeH, osizeW});
} else {
output.resize_({sizeD, osizeH, osizeW});
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input_.scalar_type(), "adaptive_avg_pool2d_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_avg_pool2d_cuda", [&] {
scalar_t *input_data = input_.data_ptr<scalar_t>();
scalar_t *output_data = output.data_ptr<scalar_t>();
// cuda blocks & threads:
int blocksH = std::max<int64_t>((int)(16L / sizeD), 1);
dim3 blocks(grid_x, blocksH);
dim3 threads(32, 8);
// run averagepool kernel
hipLaunchKernelGGL(( adaptive_average_pool) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_data, output_data,
isizeH, isizeW, osizeH, osizeW,
istrideD, istrideH, istrideW);
});
}
);
break;
}
default:
TORCH_CHECK(
false,
"Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
THCudaCheck(hipGetLastError());
}
void adaptive_avg_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input)
{
TensorArg grad_input_arg{ gradInput, "gradInput", 1 },
grad_output_arg{ gradOutput_, "gradOutput_", 2 },
input_arg{ input, "input", 3 };
checkAllSameGPU("cudnn_adaptive_avg_pooling2d_out",
{grad_input_arg, grad_output_arg, input_arg});
switch (input.suggest_memory_format()) {
case at::MemoryFormat::ChannelsLast: {
// special case for tensor memory format in channels_last
TORCH_CHECK(input.ndimension() == 4,
"non-empty 4D (batch mode) tensor expected for input with channels_last layout");
int sizeB = input.size(0);
int sizeC = input.size(1);
int isizeH = input.size(2);
int isizeW = input.size(3);
Tensor gradOutput = gradOutput_;
int64_t ostrideB = gradOutput.stride(0);
int64_t ostrideC = gradOutput.stride(1);
int64_t ostrideH = gradOutput.stride(2);
int64_t ostrideW = gradOutput.stride(3);
int osizeH = gradOutput.size(-2);
int osizeW = gradOutput.size(-1);
// preserve channels_last stride on input tensor;
if (!gradInput.is_contiguous(at::MemoryFormat::ChannelsLast)) {
gradInput.as_strided_(
{sizeB, sizeC, isizeH, isizeW},
{sizeC*isizeH*isizeW, 1, isizeW*sizeC, sizeC});
}
const int max_threads = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize;
size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock;
// Launch kernel on input tensor elements. Logic behind launch config:
// input tensor size NCHW, strides NHWC;
// Launch on:
// N(C) -> grid.x (striding on C to reduce sh_mem usage)
// H -> grid.z * block.z
// W -> grid.y * block.y
// C -> block.x
// encourage larger block_y & block_z for better cache hit while maintain
// reasonable block_x for coalesced memory access;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size()));
int block_y = std::min<int>(
maxThreadsDim[1], std::min<int>(lastPow2(isizeW), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(lastPow2(isizeH), max_threads / block_x / block_y));
block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z));
const dim3 block(block_x, block_y, block_z);
int kernel_stride_C = cuda::ATenCeilDiv(sizeC, block_x * 4);
int kernel_size_C = cuda::ATenCeilDiv(sizeC, block_x * kernel_stride_C);
// Do NOT clip grid_x, striding on Batch dimension is not in the kernel,
// although it could be easily implemented given current kernel.
int grid_x = sizeB*kernel_stride_C;
// it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel;
int grid_y = std::min<int>(
maxGridSize[1], cuda::ATenCeilDiv(isizeW, block_y*BLOCK_STRIDE));
int grid_z = std::min<int>(
maxGridSize[2], cuda::ATenCeilDiv(isizeH, block_z*BLOCK_STRIDE));
const dim3 grid(grid_x, grid_y, grid_z);
// we are dealing with packed tensor here. max index is the same as numel.
// TODO: to really support input tensor large enought to go beyond int32,
// we will need to restrict out shared memory usage and adjust the launch
// config;
AT_ASSERT(input.numel() < std::numeric_limits<int32_t>::max());
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool2d_backward_nhwc_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_avg_pool2d_backward_nhwc_cuda", [&] {
size_t shmem_size = (kernel_size_C * block_x * block_y * block_z + osizeH + osizeW) * sizeof(scalar_t) + 2 * isizeW * sizeof(int32_t);
AT_ASSERT(shmem_size <= sharedMemPerBlock);
hipLaunchKernelGGL(( adaptive_average_gradinput_nhwc<int32_t>), dim3(grid), dim3(block), shmem_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput.data_ptr<scalar_t>(),
gradOutput.data_ptr<scalar_t>(),
sizeB, sizeC, isizeH, isizeW, osizeH, osizeW,
kernel_stride_C, kernel_size_C,
ostrideB, ostrideC, ostrideH, ostrideW);
});
}
);
break;
}
case at::MemoryFormat::Contiguous: {
bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests
Tensor gradOutput = gradOutput_.contiguous();
int64_t sizeD = input.size(-3);
int64_t isizeH = input.size(-2);
int64_t isizeW = input.size(-1);
int64_t osizeH = gradOutput.size(-2);
int64_t osizeW = gradOutput.size(-1);
int64_t grid_x = sizeD;
if (input.ndimension() == 4) grid_x *= input.size(-4);
//bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0);
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool2d_backward_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_avg_pool2d_backward_cuda", [&] {
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
// cuda blocks & threads:
int blocksH = ::max((int)(16L / sizeD), 1);
dim3 blocks(grid_x, blocksH);
dim3 threads(32, 8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
hipLaunchKernelGGL(( atomic_adaptive_average_gradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_data, gradOutput_data,
isizeH, isizeW, osizeH, osizeW);
}
else
{
// run updateGradInput kernel
hipLaunchKernelGGL(( adaptive_average_gradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_data, gradOutput_data,
isizeH, isizeW, osizeH, osizeW);
}
});
}
);
break;
}
default:
TORCH_CHECK(
false,
"Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
THCudaCheck(hipGetLastError());
}
} // namespace
Tensor& adaptive_avg_pool2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size)
{
adaptive_avg_pool2d_out_cuda_template(
output, input, output_size);
return output;
}
Tensor adaptive_avg_pool2d_cuda(
at::Tensor const& input,
IntArrayRef output_size)
{
auto output = at::empty({0}, input.options());
adaptive_avg_pool2d_out_cuda_template(
output, input, output_size);
return output;
}
Tensor& adaptive_avg_pool2d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input)
{
gradInput.resize_as_(input);
adaptive_avg_pool2d_backward_out_cuda_template(
gradInput, gradOutput, input);
return gradInput;
}
Tensor adaptive_avg_pool2d_backward_cuda(
const Tensor& gradOutput,
const Tensor& input)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
adaptive_avg_pool2d_backward_out_cuda_template(
gradInput, gradOutput, input);
return gradInput;
}
} // at::native
} // at
#undef BLOCK_STRIDE
#undef CUDA_MAX_THREADS
#undef START_IND
#undef END_IND
| 44b27c419dfedcac010e55dd7f735d43122afeda.cu | #include "ATen/ATen.h"
#include "ATen/cuda/CUDAApplyUtils.cuh"
#include "ATen/cuda/CUDAContext.h"
#include "ATen/NativeFunctions.h"
#include "ATen/TensorUtils.h"
#include "ATen/Utils.h"
#include "c10/util/Exception.h"
#include <THC/THCAtomics.cuh>
#include <THC/THCGeneral.h>
#include "THC/THCNumerics.cuh"
#include <ATen/native/cuda/LaunchUtils.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <algorithm>
#include <cfloat>
#include <cmath>
#define START_IND(a,b,c) (int)std::floor((float)(a * c) / b)
#define END_IND(a,b,c) (int)std::ceil((float)((a + 1) * c) / b)
#define START_IND_INT(a,b,c) ((a * c) / b)
#define END_IND_INT(a,b,c) (((a + 1) * c + b - 1) / b)
// #define START_IND(a,b,c) a * c / b
// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
#define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched
namespace at {
namespace native {
namespace {
// 4d tensor B x D x H x W
// All kernels view batch dim B and feature dim D as collapsed.
/*
* Description:
* this function adaptively average pools an input 4D tensor along dimensions 2 and 3
* 4D input, 4D output
*/
template <typename T>
__global__ void adaptive_average_pool(T *input, T *output,
int isizeH, int isizeW,
int osizeH, int osizeW,
int64_t istrideD, int64_t istrideH, int64_t istrideW)
{
// iterators on output pixels
int oh, ow;
// select input/output plane based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
output = output + o_plane*osizeH*osizeW;
input = input + i_plane*istrideD;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
const int ostepH = blockDim.y*gridDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
const int ostepW = blockDim.x;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the average pooling over corresponding input pixels
T *ptr_input = input + istartH*istrideH + istartW*istrideW;
T *ptr_output = output + oh*osizeW + ow;
T sum = ScalarConvert<int, T>::to(0);
int ih, iw;
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
T val = ptr_input[iw*istrideW];
sum += val;
}
ptr_input += istrideH; // next input line
}
// Update output
*ptr_output = sum / kH / kW;
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
*/
template <typename T>
__global__ void adaptive_average_gradinput(
T *gradInput, T *gradOutput,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators on input pixels
int ih, iw;
// select input/output plane based on thread/block ID
int i_plane = blockIdx.x;
int o_plane = i_plane;
gradOutput = gradOutput + o_plane*osizeH*osizeW;
gradInput = gradInput + i_plane*isizeH*isizeW;
int istartH = blockDim.y*blockIdx.y + threadIdx.y;
int iendH = isizeH;
int istepH = blockDim.y*gridDim.y;
int istartW = threadIdx.x;
int iendW = isizeW;
int istepW = blockDim.x;
// compute gradInput
for(ih = istartH; ih < iendH; ih += istepH) {
int ostartH = START_IND(ih, isizeH, osizeH);
int oendH = END_IND(ih, isizeH, osizeH);
for(iw = istartW; iw < iendW; iw += istepW) {
int ostartW = START_IND(iw, isizeW, osizeW);
int oendW = END_IND(iw, isizeW, osizeW);
// Compute the gradients over corresponding output pixels
T *ptr_gradInput = gradInput + ih*isizeW + iw;
int oh, ow;
for(oh = ostartH; oh < oendH; ++oh) {
int kH = START_IND(oh, osizeH, isizeH) - END_IND(oh, osizeH, isizeH);
for(ow = ostartW; ow < oendW; ++ow) {
int kW = START_IND(ow, osizeW, isizeW) - END_IND(ow, osizeW, isizeW);
T grad_delta = gradOutput[ow + oh*osizeW] / kH / kW;
*ptr_gradInput += grad_delta;
}
}
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
* (uses atomic add)
*/
template <typename T>
__global__ void atomic_adaptive_average_gradinput(
T *gradInput, T *gradOutput,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators on output indices
int oh, ow;
// select input/output plane based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
gradOutput = gradOutput + o_plane*osizeW*osizeH;
gradInput = gradInput + i_plane*isizeW*isizeH;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
int ostepH = blockDim.y*gridDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the gradients for over corresponding input pixels
T *ptr_gradInput = gradInput + istartH*isizeW + istartW;
T *ptr_gradOutput = gradOutput + oh*osizeW + ow;
T grad_delta = *ptr_gradOutput / kW / kH;
int ih, iw;
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
// atomic add since different threads could update same variable
gpuAtomicAdd(&(ptr_gradInput[iw]), grad_delta);
}
ptr_gradInput += isizeW; // next input line
}
}
}
}
/*
* Description:
* this function adaptively average pools an input 4D tensor along dimensions 2 and 3
* NHWC layout for both input and output tensor
* 4D input, 4D output
*/
template <typename index_t, typename scalar_t>
C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)
__global__ void adaptive_average_pool_nhwc(const scalar_t* __restrict__ input, scalar_t* __restrict__ output,
int sizeB, int sizeC,
int isizeH, int isizeW,
int osizeH, int osizeW,
int kernel_stride_C, int kernel_size_C,
index_t istrideB, index_t istrideC,
index_t istrideH, index_t istrideW)
{
extern __shared__ int smem[];
scalar_t *out_cached = reinterpret_cast<scalar_t*>(smem);
// flattening cta for pre-computation & smem initialization;
int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
int block_size = blockDim.x * blockDim.y * blockDim.z;
// use shared memory to store temporary output value. This is simply to
// reduce register usage.
for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {
out_cached[i] = scalar_t(0.0);
}
__syncthreads();
// each CTA handles a portion of a single slice on batch dimension;
int batch_id = blockIdx.x % sizeB;
int channel_id = blockIdx.x / sizeB;
int channel_offset = threadIdx.x + channel_id * blockDim.x;
// each CTA handles a single slice on batch dimension;
// We use gridDim.x to handle striding on C as well.
output = output + batch_id * osizeH * osizeW * sizeC;
input = input + batch_id * istrideB;
// split out_cached and exclusively it assigned to each thread;
out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C * blockDim.x];
// iterate on output H & W.
// Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on
// tile so there's a better chance to hit L1 cache.
index_t oH = (osizeH + gridDim.z-1) / gridDim.z;
index_t oW = (osizeW + gridDim.y-1) / gridDim.y;
index_t ostartH = threadIdx.z + blockIdx.z*oH;
index_t oendH = ::min(ostartH+oH, osizeH);
index_t ostartW = threadIdx.y + blockIdx.y*oW;
index_t oendW = ::min(ostartW+oW, osizeW);
// Stride for threads, each warp can reuse L1 as they go. So theoretically
// better chance to survive cache eviction.
for (int oh = ostartH; oh < oendH; oh+=blockDim.z) {
int istartH = START_IND_INT(oh, osizeH, isizeH);
int iendH = END_IND_INT(oh, osizeH, isizeH);
for (int ow = ostartW; ow < oendW; ow+=blockDim.y) {
int istartW = START_IND_INT(ow, osizeW, isizeW);
int iendW = END_IND_INT(ow, osizeW, isizeW);
scalar_t factor = scalar_t(1.0) / ((iendH-istartH) * (iendW-istartW));
// loop on input: hierarchy h->w->c, use shared memory here hopefully
// would not stall global memory read;
for (index_t ih = istartH; ih < iendH; ih++) {
for (index_t iw = istartW; iw < iendW; iw++) {
int cached_index = threadIdx.x;
const scalar_t *ptr_input = input + ih*istrideH + iw*istrideW;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
out_cached[cached_index] += ptr_input[c*istrideC];
cached_index += blockDim.x;
}
}
}
scalar_t *ptr_output = output + (oh * osizeW + ow) * sizeC;
int cached_index = threadIdx.x;
// write accumulated output to global memory;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
// This causes numerical issueptr when unit test with NCHW kernel;
// switch to could verify the correctness;
// output[c] = out_cached[c] / (iendH-istartH) / (iendW-istartW);
ptr_output[c] = out_cached[cached_index] * factor;
out_cached[cached_index] = scalar_t(0.0);
cached_index += blockDim.x;
}
// no need to __syncthreads() since out_cached is not shared.
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
* NHWC layout for both input and output tensor
* 4D input, 4D output
*/
template <typename index_t, typename scalar_t>
C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)
__global__ void adaptive_average_gradinput_nhwc(scalar_t* __restrict__ gradInput, const scalar_t* __restrict__ gradOutput,
int sizeB, int sizeC,
int isizeH, int isizeW,
int osizeH, int osizeW,
int kernel_stride_C, int kernel_size_C,
index_t ostrideB, index_t ostrideC,
index_t ostrideH, index_t ostrideW)
{
extern __shared__ int smem[];
index_t *ostartW_cached = smem;
index_t *oendW_cached = &ostartW_cached[isizeW];
// be careful with alignment, in case scalar_t is fp16, we want to assign
// int pointers first.
scalar_t *r_kW_cached = reinterpret_cast<scalar_t*>(&oendW_cached[isizeW]);
scalar_t *r_kH_cached = &r_kW_cached[osizeW];
scalar_t *out_cached = &r_kH_cached[osizeH];
// flattening cta for pre-computation & smem initialization;
int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
int block_size = blockDim.x * blockDim.y * blockDim.z;
// Precompute output start/end index per input index on width dimension;
// Not doing this for height dimension, as that's our out-most loop.
for (index_t i = thread_id; i < isizeW; i+= block_size) {
ostartW_cached[i] = START_IND_INT(i, isizeW, osizeW);
oendW_cached[i] = END_IND_INT(i, isizeW, osizeW);
}
// Precompute pooling height/weight factor for each output element;
// This is used to weight output gradient when accumulate them on input
// gradient.
// Technically we don't have to compute it for the whole `osizeH`, since
// each cta only covers a consecutive portion of the entire output. But it's
// not going to save us from code divergence, and shared memory save is not
// an issue neither, so just leave it as is for now.
for (index_t i = thread_id; i < osizeH; i+= block_size) {
r_kH_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeH, isizeH) - START_IND_INT(i, osizeH, isizeH));
}
for (index_t i = thread_id; i < osizeW; i+= block_size) {
r_kW_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeW, isizeW) - START_IND_INT(i, osizeW, isizeW));
}
// each CTA handles a portion of a single slice on batch dimension;
int batch_id = blockIdx.x % sizeB;
int channel_id = blockIdx.x / sizeB;
int channel_offset = threadIdx.x + channel_id * blockDim.x;
// use shared memory to store temporary output value. This is simply to
// reduce register usage.
for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {
out_cached[i] = scalar_t(0.0);
}
__syncthreads();
// each CTA handles a portion of a single slice on batch dimension;
// We use gridDim.x to handle striding on C as well.
gradInput = gradInput + batch_id * isizeH * isizeW * sizeC;
gradOutput = gradOutput + batch_id * ostrideB;
// split out_cached and exclusively it assigned to each thread;
out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x * kernel_size_C];
// iterate on input H & W.
// Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on
// tile so there's a better chance to hit L1 cache.
index_t iH = (isizeH + gridDim.z-1) / gridDim.z;
index_t iW = (isizeW + gridDim.y-1) / gridDim.y;
index_t istartH = threadIdx.z + blockIdx.z*iH;
index_t iendH = ::min(istartH+iH, isizeH);
index_t istartW = threadIdx.y + blockIdx.y*iW;
index_t iendW = ::min(istartW+iW, isizeW);
// Stride for threads, each warp can reuse L1 as they go. So theoretically
// better chance to survive cache eviction.
for (index_t ih = istartH; ih < iendH; ih+=blockDim.z) {
index_t ostartH = START_IND_INT(ih, isizeH, osizeH);
index_t oendH = END_IND_INT(ih, isizeH, osizeH);
for (index_t iw = istartW; iw < iendW; iw+=blockDim.y) {
// loop on output: hierarchy h->w->c, so we could reuse weight factor f
// because it remains the same for given oh & ow
for(index_t oh = ostartH; oh < oendH; ++oh) {
for(index_t ow = ostartW_cached[iw]; ow < oendW_cached[iw]; ++ow) {
scalar_t f = r_kW_cached[ow] * r_kH_cached[oh];
const scalar_t* ptr_gradOutput = gradOutput + oh*ostrideH + ow*ostrideW;
int cached_index = threadIdx.x;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
out_cached[cached_index] += ptr_gradOutput[c*ostrideC] * f;
cached_index += blockDim.x;
}
}
}
scalar_t *ptr_gradInput = gradInput + (ih * isizeW + iw) * sizeC;
int cached_index = threadIdx.x;
// write accumulated gradIput to global memory;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
ptr_gradInput[c] = out_cached[cached_index];
out_cached[cached_index] = scalar_t(0.0);
cached_index += blockDim.x;
}
// no need to __syncthreads() since out_cached is not shared.
}
}
}
// 4d tensor B x D x H x W
void adaptive_avg_pool2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size)
{
TensorArg input_arg{ input, "input", 1 },
output_arg{ output, "output", 2 };
checkAllSameGPU("cudnn_adaptive_avg_pooling2d", {input_arg, output_arg});
for (int64_t i = 0; i < input.ndimension(); i++) {
TORCH_CHECK(input.size(i) > 0,
"adaptive_avg_pooling2d(): expected input to have non-empty spatial dimensions, "
"but input has sizes ", input.sizes(), " with dimension ", i, " being "
"empty");
}
Tensor input_ = input;
switch (input.suggest_memory_format()) {
case at::MemoryFormat::ChannelsLast: {
// special case for tensor memory format in channels_last
TORCH_CHECK(input.ndimension() == 4,
"non-empty 4D (batch mode) tensor expected for input with channels_last layout");
int sizeB = input_.size(0);
int sizeC = input_.size(1);
int isizeH = input_.size(2);
int isizeW = input_.size(3);
int64_t istrideB = input_.stride(0);
int64_t istrideC = input_.stride(1);
int64_t istrideH = input_.stride(2);
int64_t istrideW = input_.stride(3);
int osizeH = output_size[0];
int osizeW = output_size[1];
// preserve channels_last stride on output tensor;
if (!output.is_contiguous(at::MemoryFormat::ChannelsLast)) {
// TODO: modify this after resize_ added `memory_format` tag
output.resize_({sizeB, sizeC, osizeH, osizeW}).as_strided_({sizeB, sizeC, osizeH, osizeW}, {sizeC*osizeH*osizeW, 1, osizeW*sizeC, sizeC});
}
const int max_threads = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize;
size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock;
// Launch kernel on output tensor elements. Logic behind launch config:
// output tensor size NCHW, strides NHWC;
// Launch on:
// N -> grid.x
// H -> grid.z * block.z
// W -> grid.y * block.y
// C -> block.x
// encourage larger block_y & block_z for better cache hit while maintain
// reasonable block_x for coalesced memory access;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size()));
int block_y = std::min<int>(
maxThreadsDim[1], std::min<int>(lastPow2(osizeW), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(lastPow2(osizeH), max_threads / block_x / block_y));
block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z));
const dim3 block(block_x, block_y, block_z);
int kernel_stride_C = cuda::ATenCeilDiv(sizeC, block_x * 4);
int kernel_size_C = cuda::ATenCeilDiv(sizeC, block_x * kernel_stride_C);
// Do NOT clip grid_x, striding on Batch dimension is not in the kernel,
// although it could be easily implemented given current kernel.
int grid_x = sizeB*kernel_stride_C;
// it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel;
int grid_y = std::min<int>(
maxGridSize[1], cuda::ATenCeilDiv(osizeW, block_y*BLOCK_STRIDE));
int grid_z = std::min<int>(
maxGridSize[2], cuda::ATenCeilDiv(osizeH, block_z*BLOCK_STRIDE));
const dim3 grid(grid_x, grid_y, grid_z);
// we are dealing with packed tensor here. max index is the same as numel.
// TODO: to really support input tensor large enought to go beyond int32,
// we will need to restrict out shared memory usage and adjust the launch
// config;
AT_ASSERT(input_.numel() < std::numeric_limits<int32_t>::max());
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input_.scalar_type(), "adaptive_avg_pool2d_nhwc_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_avg_pool2d_nhwc_cuda", [&] {
size_t shmem_size = (kernel_size_C * block_x * block_y * block_z) * sizeof(scalar_t);
AT_ASSERT(shmem_size <= sharedMemPerBlock);
adaptive_average_pool_nhwc<int32_t><<<grid, block, shmem_size, at::cuda::getCurrentCUDAStream()>>> (
input_.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
sizeB, sizeC, isizeH, isizeW, osizeH, osizeW,
kernel_stride_C, kernel_size_C,
istrideB, istrideC, istrideH, istrideW);
});
}
);
break;
}
case at::MemoryFormat::Contiguous: {
TORCH_CHECK((input.ndimension() == 3 || input.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
int64_t grid_x = input.size(-3);
if (input.ndimension() == 4) {
input_ = input.contiguous();
grid_x *= input_.size(-4);
}
int64_t sizeD = input_.size(-3);
int64_t isizeH = input_.size(-2);
int64_t isizeW = input_.size(-1);
int64_t istrideD = input_.stride(-3);
int64_t istrideH = input_.stride(-2);
int64_t istrideW = input_.stride(-1);
int64_t osizeH = output_size[0];
int64_t osizeW = output_size[1];
if (input.ndimension() == 4) {
output.resize_({input_.size(-4), sizeD, osizeH, osizeW});
} else {
output.resize_({sizeD, osizeH, osizeW});
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input_.scalar_type(), "adaptive_avg_pool2d_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_avg_pool2d_cuda", [&] {
scalar_t *input_data = input_.data_ptr<scalar_t>();
scalar_t *output_data = output.data_ptr<scalar_t>();
// cuda blocks & threads:
int blocksH = std::max<int64_t>((int)(16L / sizeD), 1);
dim3 blocks(grid_x, blocksH);
dim3 threads(32, 8);
// run averagepool kernel
adaptive_average_pool <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> (
input_data, output_data,
isizeH, isizeW, osizeH, osizeW,
istrideD, istrideH, istrideW);
});
}
);
break;
}
default:
TORCH_CHECK(
false,
"Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
THCudaCheck(cudaGetLastError());
}
void adaptive_avg_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input)
{
TensorArg grad_input_arg{ gradInput, "gradInput", 1 },
grad_output_arg{ gradOutput_, "gradOutput_", 2 },
input_arg{ input, "input", 3 };
checkAllSameGPU("cudnn_adaptive_avg_pooling2d_out",
{grad_input_arg, grad_output_arg, input_arg});
switch (input.suggest_memory_format()) {
case at::MemoryFormat::ChannelsLast: {
// special case for tensor memory format in channels_last
TORCH_CHECK(input.ndimension() == 4,
"non-empty 4D (batch mode) tensor expected for input with channels_last layout");
int sizeB = input.size(0);
int sizeC = input.size(1);
int isizeH = input.size(2);
int isizeW = input.size(3);
Tensor gradOutput = gradOutput_;
int64_t ostrideB = gradOutput.stride(0);
int64_t ostrideC = gradOutput.stride(1);
int64_t ostrideH = gradOutput.stride(2);
int64_t ostrideW = gradOutput.stride(3);
int osizeH = gradOutput.size(-2);
int osizeW = gradOutput.size(-1);
// preserve channels_last stride on input tensor;
if (!gradInput.is_contiguous(at::MemoryFormat::ChannelsLast)) {
gradInput.as_strided_(
{sizeB, sizeC, isizeH, isizeW},
{sizeC*isizeH*isizeW, 1, isizeW*sizeC, sizeC});
}
const int max_threads = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize;
size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock;
// Launch kernel on input tensor elements. Logic behind launch config:
// input tensor size NCHW, strides NHWC;
// Launch on:
// N(C) -> grid.x (striding on C to reduce sh_mem usage)
// H -> grid.z * block.z
// W -> grid.y * block.y
// C -> block.x
// encourage larger block_y & block_z for better cache hit while maintain
// reasonable block_x for coalesced memory access;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size()));
int block_y = std::min<int>(
maxThreadsDim[1], std::min<int>(lastPow2(isizeW), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(lastPow2(isizeH), max_threads / block_x / block_y));
block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z));
const dim3 block(block_x, block_y, block_z);
int kernel_stride_C = cuda::ATenCeilDiv(sizeC, block_x * 4);
int kernel_size_C = cuda::ATenCeilDiv(sizeC, block_x * kernel_stride_C);
// Do NOT clip grid_x, striding on Batch dimension is not in the kernel,
// although it could be easily implemented given current kernel.
int grid_x = sizeB*kernel_stride_C;
// it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel;
int grid_y = std::min<int>(
maxGridSize[1], cuda::ATenCeilDiv(isizeW, block_y*BLOCK_STRIDE));
int grid_z = std::min<int>(
maxGridSize[2], cuda::ATenCeilDiv(isizeH, block_z*BLOCK_STRIDE));
const dim3 grid(grid_x, grid_y, grid_z);
// we are dealing with packed tensor here. max index is the same as numel.
// TODO: to really support input tensor large enought to go beyond int32,
// we will need to restrict out shared memory usage and adjust the launch
// config;
AT_ASSERT(input.numel() < std::numeric_limits<int32_t>::max());
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool2d_backward_nhwc_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_avg_pool2d_backward_nhwc_cuda", [&] {
size_t shmem_size = (kernel_size_C * block_x * block_y * block_z + osizeH + osizeW) * sizeof(scalar_t) + 2 * isizeW * sizeof(int32_t);
AT_ASSERT(shmem_size <= sharedMemPerBlock);
adaptive_average_gradinput_nhwc<int32_t><<<grid, block, shmem_size, at::cuda::getCurrentCUDAStream()>>> (
gradInput.data_ptr<scalar_t>(),
gradOutput.data_ptr<scalar_t>(),
sizeB, sizeC, isizeH, isizeW, osizeH, osizeW,
kernel_stride_C, kernel_size_C,
ostrideB, ostrideC, ostrideH, ostrideW);
});
}
);
break;
}
case at::MemoryFormat::Contiguous: {
bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests
Tensor gradOutput = gradOutput_.contiguous();
int64_t sizeD = input.size(-3);
int64_t isizeH = input.size(-2);
int64_t isizeW = input.size(-1);
int64_t osizeH = gradOutput.size(-2);
int64_t osizeW = gradOutput.size(-1);
int64_t grid_x = sizeD;
if (input.ndimension() == 4) grid_x *= input.size(-4);
//bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0);
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool2d_backward_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_avg_pool2d_backward_cuda", [&] {
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
// cuda blocks & threads:
int blocksH = std::max((int)(16L / sizeD), 1);
dim3 blocks(grid_x, blocksH);
dim3 threads(32, 8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
atomic_adaptive_average_gradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> (
gradInput_data, gradOutput_data,
isizeH, isizeW, osizeH, osizeW);
}
else
{
// run updateGradInput kernel
adaptive_average_gradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> (
gradInput_data, gradOutput_data,
isizeH, isizeW, osizeH, osizeW);
}
});
}
);
break;
}
default:
TORCH_CHECK(
false,
"Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
THCudaCheck(cudaGetLastError());
}
} // namespace
Tensor& adaptive_avg_pool2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size)
{
adaptive_avg_pool2d_out_cuda_template(
output, input, output_size);
return output;
}
Tensor adaptive_avg_pool2d_cuda(
at::Tensor const& input,
IntArrayRef output_size)
{
auto output = at::empty({0}, input.options());
adaptive_avg_pool2d_out_cuda_template(
output, input, output_size);
return output;
}
Tensor& adaptive_avg_pool2d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input)
{
gradInput.resize_as_(input);
adaptive_avg_pool2d_backward_out_cuda_template(
gradInput, gradOutput, input);
return gradInput;
}
Tensor adaptive_avg_pool2d_backward_cuda(
const Tensor& gradOutput,
const Tensor& input)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
adaptive_avg_pool2d_backward_out_cuda_template(
gradInput, gradOutput, input);
return gradInput;
}
} // at::native
} // at
#undef BLOCK_STRIDE
#undef CUDA_MAX_THREADS
#undef START_IND
#undef END_IND
|
c3d84c58786096ae9ac9bad43a88c625dfb60ec1.hip | // !!! This is a file automatically generated by hipify!!!
#include "neutron_cuda_kernel.h"
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
// https://devblogs.nvidia.com/cuda-pro-tip-optimized-filtering-warp-aggregated-atomics
using namespace cooperative_groups;
__global__
void neutron_cuda_kernel(
long n,
int neutronsPerThread,
const ProblemParameters* params,
unsigned long long int* next_absorbed,
float* absorbed,
unsigned long long int* d_r,
unsigned long long int* d_b,
unsigned long long int* d_t,
unsigned long long* seeds
) {
const long id = blockIdx.x*blockDim.x + threadIdx.x;
hiprandState_t state;
hiprand_init(seeds[id], 0, 0, &state);
const float c = params->c;
const float c_c = params->c_c;
const float h = params->h;
unsigned int r = 0, b = 0, t = 0; // int is enough for local counts
const long k = id*neutronsPerThread;
const long m = min(static_cast<long>(neutronsPerThread), n-k);
for (long i=0; i<m; i++) {
float d = 0.0;
float x = 0.0;
float v;
while (1) {
const float u = hiprand_uniform (&state);
const float L = -(1 / c) * log(u);
x = x + L * cos(d);
v = NO_VAL;
if (x < 0) {
r++;
break;
}
else if (x >= h) {
t++;
break;
}
else if (hiprand_uniform (&state) < c_c / c) {
b++;
v = x;
break;
}
else {
const float u = hiprand_uniform (&state);
d = u * M_PI;
}
}
// save values to global memory
if (v != NO_VAL) {
auto g = coalesced_threads();
unsigned long long int pos;
if (g.thread_rank() == 0)
pos = atomicAdd(next_absorbed, g.size());
absorbed[g.shfl(pos, 0) + g.thread_rank()] = v;
}
}
atomicAdd(d_r, static_cast<unsigned long long int>(r));
atomicAdd(d_b, static_cast<unsigned long long int>(b));
atomicAdd(d_t, static_cast<unsigned long long int>(t));
}
| c3d84c58786096ae9ac9bad43a88c625dfb60ec1.cu | #include "neutron_cuda_kernel.h"
#include <cuda.h>
#include <cooperative_groups.h>
// https://devblogs.nvidia.com/cuda-pro-tip-optimized-filtering-warp-aggregated-atomics
using namespace cooperative_groups;
__global__
void neutron_cuda_kernel(
long n,
int neutronsPerThread,
const ProblemParameters* params,
unsigned long long int* next_absorbed,
float* absorbed,
unsigned long long int* d_r,
unsigned long long int* d_b,
unsigned long long int* d_t,
unsigned long long* seeds
) {
const long id = blockIdx.x*blockDim.x + threadIdx.x;
curandState state;
curand_init(seeds[id], 0, 0, &state);
const float c = params->c;
const float c_c = params->c_c;
const float h = params->h;
unsigned int r = 0, b = 0, t = 0; // int is enough for local counts
const long k = id*neutronsPerThread;
const long m = min(static_cast<long>(neutronsPerThread), n-k);
for (long i=0; i<m; i++) {
float d = 0.0;
float x = 0.0;
float v;
while (1) {
const float u = curand_uniform (&state);
const float L = -(1 / c) * log(u);
x = x + L * cos(d);
v = NO_VAL;
if (x < 0) {
r++;
break;
}
else if (x >= h) {
t++;
break;
}
else if (curand_uniform (&state) < c_c / c) {
b++;
v = x;
break;
}
else {
const float u = curand_uniform (&state);
d = u * M_PI;
}
}
// save values to global memory
if (v != NO_VAL) {
auto g = coalesced_threads();
unsigned long long int pos;
if (g.thread_rank() == 0)
pos = atomicAdd(next_absorbed, g.size());
absorbed[g.shfl(pos, 0) + g.thread_rank()] = v;
}
}
atomicAdd(d_r, static_cast<unsigned long long int>(r));
atomicAdd(d_b, static_cast<unsigned long long int>(b));
atomicAdd(d_t, static_cast<unsigned long long int>(t));
}
|
72b0a9fa14e3a706299a1c990a4e63568187fb28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* CuMatrixReductions.cu
*
* basic reduction kernels, exec ctx
* Author: reid
*/
#include "CuMatrix.h"
#include "Kernels.h"
#include "caps.h"
#include "util.h"
#include "CuDefs.h"
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE
T CuMatrix<T>::reduce(const DMatrix<T>& d_M, BinaryOp<T> op, T start, hipStream_t stream )
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE
T CuMatrix<T>::reduce(const DMatrix<T>& d_M, MonoidF<T,StateDim> op, T start, hipStream_t stream )
#endif
{
long nP = d_M.m * d_M.n;
if(nP == 1) {
T result;
CuTimer timer;
timer.start();
#ifndef __CUDA_ARCH__
cherr(hipMemcpy(&result, d_M.elements, sizeof(T), hipMemcpyDeviceToHost));
//CuMatrix<T>::incDhCopy("CuMatrix<T>::reduce(long l)", sizeof(T),timer.stop());
#else
memcpy(&result, d_M.elements, sizeof(T));
#endif
return result;
}
int threads;
int blocks;
::getReductionExecContext(blocks, threads, nP);
if(checkDebug(debugRedux )) flprintf("CuMatrix<T>::reduce blocks %d threads %d np %d\n", blocks,threads, nP);
#ifdef CuMatrix_Enable_Cdp
cherr(hipPeekAtLastError());
#endif
//CuMatrix<T> res(blocks, 1, false, true);
cherr(hipPeekAtLastError());
if(checkDebug(debugRedux)) {
prlocf("res ");
//res.printShortString();
}
DMatrix<T> d_Res(blocks,1);
cherr(hipMalloc( &(d_Res.elements), blocks*sizeof(T)));
//res.tile0(d_Res, false);
if(checkDebug(debugRedux)) {
prlocf("after res.tile0(..)\n");
}
#ifndef __CUDA_ARCH__
checkCudaError(hipDeviceSynchronize());
if(checkDebug(debugRedux)) prlocf("after tile0");
if(checkDebug(debugRedux)) prlocf("host \n");
#else
if(checkDebug(debugRedux)) prlocf("dev \n");
#endif
T total = 0;
if(checkDebug(debugRedux)) flprintf("&total %p\n",&total);
if(checkDebug(debugRedux)) flprintf("curr dev %d device of d_m.elems %d device of d_Res.elems %d\n",ExecCaps::currDev(),
b_util::getDevice(d_M.elements), b_util::getDevice(d_Res.elements));
if(checkDebug(debugRedux)) flprintf("d_M.m,d_M.n %d d_M.p %d stride %d\n",d_M.m, d_M.n, d_M.p ,d_M.n != d_M.p ? d_M.p : 1);
reduceLauncher(&total, d_Res, nP, d_M, op, start, 0, stream);
#ifndef __CUDA_ARCH__
cherr(hipPeekAtLastError());
cherr(hipStreamSynchronize(stream));
#else
#ifdef CuMatrix_Enable_Cdp
cherr(hipPeekAtLastError());
#endif
__syncthreads();
#endif
if(checkDebug(debugRedux))flprintf("total now %f\n",total);
cherr(hipFree( d_Res.elements));
return total;
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<maxBinaryOp>(DMatrix<float> const&, maxBinaryOp<float>, float, ihipStream_t*);
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<minBinaryOp>(DMatrix<float> const&, minBinaryOp<float>, float, ihipStream_t*);
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<maxBinaryOp>(DMatrix<double> const&, maxBinaryOp<double>, double, ihipStream_t*);
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<minBinaryOp>(DMatrix<double> const&, minBinaryOp<double>, double, ihipStream_t*);
template __host__ CUDART_DEVICE ulong CuMatrix<ulong>::reduce<minBinaryOp>(DMatrix<ulong> const&, minBinaryOp<ulong>, ulong, ihipStream_t*);
template __host__ CUDART_DEVICE ulong CuMatrix<ulong>::reduce<maxBinaryOp>(DMatrix<ulong> const&, maxBinaryOp<ulong>, ulong, ihipStream_t*);
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<maxBinaryOp>(DMatrix<int> const&, maxBinaryOp<int>, int, ihipStream_t*);
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<minBinaryOp>(DMatrix<int> const&, minBinaryOp<int>, int, ihipStream_t*);
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<maxBinaryOp>(DMatrix<unsigned int> const&, maxBinaryOp<unsigned int>, unsigned int, ihipStream_t*);
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<minBinaryOp>(DMatrix<unsigned int> const&, minBinaryOp<unsigned int>, unsigned int, ihipStream_t*);
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<andBinaryOp>(DMatrix<float> const&, andBinaryOp<float>, float, ihipStream_t*);
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<andBinaryOp>(DMatrix<double> const&, andBinaryOp<double>, double, ihipStream_t*);
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<andBinaryOp>(DMatrix<int> const&, andBinaryOp<int>, int, ihipStream_t*);
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<andBinaryOp>(DMatrix<unsigned int> const&, andBinaryOp<unsigned int>, unsigned int, ihipStream_t*);
template __host__ CUDART_DEVICE long CuMatrix<long>::reduce<andBinaryOp>(DMatrix<long> const&, andBinaryOp<long>, long, ihipStream_t*);
template __host__ CUDART_DEVICE unsigned long CuMatrix<unsigned long>::reduce<andBinaryOp>(DMatrix<unsigned long> const&, andBinaryOp<unsigned long>, unsigned long, ihipStream_t*);
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<orBinaryOp>(DMatrix<float> const&, orBinaryOp<float>, float, ihipStream_t*);
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<orBinaryOp>(DMatrix<double> const&, orBinaryOp<double>, double, ihipStream_t*);
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<orBinaryOp>(DMatrix<int> const&, orBinaryOp<int>, int, ihipStream_t*);
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<orBinaryOp>(DMatrix<unsigned int> const&, orBinaryOp<unsigned int>, unsigned int, ihipStream_t*);
template __host__ CUDART_DEVICE long CuMatrix<long>::reduce<orBinaryOp>(DMatrix<long> const&, orBinaryOp<long>, long, ihipStream_t*);
template __host__ CUDART_DEVICE unsigned long CuMatrix<unsigned long>::reduce<orBinaryOp>(DMatrix<unsigned long> const&, orBinaryOp<unsigned long>, unsigned long, ihipStream_t*);
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<plusBinaryOp>(DMatrix<float> const&, plusBinaryOp<float>, float, ihipStream_t*);
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<plusBinaryOp>(DMatrix<double> const&, plusBinaryOp<double>, double, ihipStream_t*);
template __host__ CUDART_DEVICE ulong CuMatrix<ulong>::reduce<plusBinaryOp>(DMatrix<ulong> const&, plusBinaryOp<ulong>, ulong, ihipStream_t*);
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<plusBinaryOp>(DMatrix<int> const&, plusBinaryOp<int>, int, ihipStream_t*);
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<plusBinaryOp>(DMatrix<unsigned int> const&, plusBinaryOp<unsigned int>, unsigned int, ihipStream_t*);
template __host__ CUDART_DEVICE long CuMatrix<long>::reduce<plusBinaryOp>(DMatrix<long> const&, plusBinaryOp<long>, long, ihipStream_t*);
#else
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce(DMatrix<float> const&, MonoidF<float,1>, float, ihipStream_t*);
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce(DMatrix<double> const&, MonoidF<double,1>, double, ihipStream_t*);
template __host__ CUDART_DEVICE long CuMatrix<long>::reduce(DMatrix<long> const&, MonoidF<long,1>, long, ihipStream_t*);
template __host__ CUDART_DEVICE ulong CuMatrix<ulong>::reduce(DMatrix<ulong> const&, MonoidF<ulong,1>, ulong, ihipStream_t*);
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce(DMatrix<int> const&, MonoidF<int,1>, int, ihipStream_t*);
template __host__ CUDART_DEVICE uint CuMatrix<uint>::reduce(DMatrix<uint> const&, MonoidF<uint,1>, uint, ihipStream_t*);
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE
void CuMatrix<T>::reduceColumn(T* total, const DMatrix<T>& d_M, BinaryOp<T> op, T start, int col, hipStream_t stream )
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE
void CuMatrix<T>::reduceColumn(T* total, const DMatrix<T>& d_M, MonoidF<T,StateDim> op, T start, int col, hipStream_t stream )
#endif
{
long nP = d_M.m;
int threads;
int blocks;
::getReductionExecContext(blocks, threads, nP);
if(checkDebug(debugRedux))flprintf("CuMatrix<T>::reduceColumn blocks %d threads %d np %d\n", blocks,threads, nP);
#ifdef CuMatrix_Enable_Cdp
cherr(hipPeekAtLastError());
#endif
CuMatrix<T> res(blocks, 1, false, true);
if(checkDebug(debugRedux)) {
prlocf("res ");
res.printShortString();
}
DMatrix<T> d_Res;
res.tile0(d_Res, false);
if(checkDebug(debugRedux)) {
prlocf("after res.tile0(..)\n");
}
#ifndef __CUDA_ARCH__
checkCudaError(hipDeviceSynchronize());
if(checkDebug(debugRedux)){ prlocf("host ");}
#else
if(checkDebug(debugRedux)){ prlocf("dev ");}
#endif
if(checkDebug(debugRedux)){ flprintf("&total %p\n",&total); }
reduceLauncher(total, d_Res, nP, d_M, op, start, col, stream);
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceColumn<plusBinaryOp>(float*, DMatrix<float> const&, plusBinaryOp<float>, float, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceColumn<plusBinaryOp>(int*, DMatrix<int> const&, plusBinaryOp<int>, int, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceColumn<plusBinaryOp>(long*, DMatrix<long> const&, plusBinaryOp<long>, long, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<unsigned int>::reduceColumn<plusBinaryOp>(unsigned int*, DMatrix<unsigned int> const&, plusBinaryOp<unsigned int>, unsigned int, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceColumn<plusBinaryOp>(double*, DMatrix<double> const&, plusBinaryOp<double>, double, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<unsigned long>::reduceColumn<plusBinaryOp>(unsigned long*, DMatrix<unsigned long> const&, plusBinaryOp<unsigned long>, unsigned long, int, ihipStream_t*);
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceColumn(float*, DMatrix<float> const&, MonoidF<float, 1>, float, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceColumn(double*, DMatrix<double> const&, MonoidF<double, 1>, double, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceColumn(int*, DMatrix<int> const&, MonoidF<int, 1>, int, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceColumn(uint*, DMatrix<uint> const&, MonoidF<uint, 1>, uint, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceColumn(long*, DMatrix<long> const&, MonoidF<long, 1>, long, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceColumn(ulong*, DMatrix<ulong> const&, MonoidF<ulong, 1>, ulong, int, ihipStream_t*);
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE void CuMatrix<T>::reduceAsync(
T* result, const DMatrix<T>& d_M, BinaryOp<T> op, T start, hipStream_t stream )
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::reduceAsync(
T* result, const DMatrix<T>& d_M, MonoidF<T,StateDim> op, T start, hipStream_t stream )
#endif
{
long nP = d_M.m * d_M.n;
int threads;
int blocks;
::getReductionExecContext(blocks, threads, nP);
if(checkDebug(debugExec)) flprintf("reduceAsync blocks %d\n", blocks);
CuMatrix<T> res(blocks, 1, true, true);
DMatrix<T> d_Res;
res.tile0(d_Res, false);
reduceLauncher(result, d_Res, nP, d_M, op, start, 0, stream);
#ifndef __CUDA_ARCH__
checkCudaError(hipStreamSynchronize(stream));
#else
__syncthreads();
#endif
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsync<maxBinaryOp>(float*,DMatrix<float> const&, maxBinaryOp<float>, float, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsync<minBinaryOp>(float*,DMatrix<float> const&, minBinaryOp<float>, float, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsync<maxBinaryOp>(double*,DMatrix<double> const&, maxBinaryOp<double>, double, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsync<minBinaryOp>(double*,DMatrix<double> const&, minBinaryOp<double>, double, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsync<maxBinaryOp>(ulong*,DMatrix<ulong> const&, maxBinaryOp<ulong>, ulong, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsync<minBinaryOp>(ulong*,DMatrix<ulong> const&, minBinaryOp<ulong>, ulong, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsync<minBinaryOp>(int*, DMatrix<int> const&, minBinaryOp<int>, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsync<maxBinaryOp>(int*, DMatrix<int> const&, maxBinaryOp<int>, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceAsync<minBinaryOp>(uint*, DMatrix<uint> const&, minBinaryOp<uint>, uint, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceAsync<maxBinaryOp>(uint*, DMatrix<uint> const&, maxBinaryOp<uint>, uint, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceAsync<minBinaryOp>(long*, DMatrix<long> const&, minBinaryOp<long>, long, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceAsync<maxBinaryOp>(long*, DMatrix<long> const&, maxBinaryOp<long>, long, ihipStream_t*);
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsync<1>(float*,DMatrix<float> const&, MonoidF<float,1>, float, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsync<1>(double*,DMatrix<double> const&, MonoidF<double,1>, double, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsync<1>(int*,DMatrix<int> const&, MonoidF<int,1>, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceAsync<1>(uint*,DMatrix<uint> const&, MonoidF<uint,1>, uint, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceAsync<1>(long*,DMatrix<long> const&, MonoidF<long,1>, long, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsync<1>(ulong*,DMatrix<ulong> const&, MonoidF<ulong,1>, ulong, ihipStream_t*);
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE void CuMatrix<T>::reduceAsyncBuffer(
T* result, DMatrix<T>& buffer, int blocks, int threads, long nP, const DMatrix<T>& d_M, BinaryOp<T> op, T start, hipStream_t stream )
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::reduceAsyncBuffer(
T* result, DMatrix<T>& buffer, int blocks, int threads, long nP, const DMatrix<T>& d_M, MonoidF<T,StateDim> op, T start, hipStream_t stream )
#endif
{
if(checkDebug(debugExec)) flprintf("reduceAsyncBuffer blocks %d\n", blocks);
reduceLauncher(result, buffer, nP, d_M, op, start, 0, stream);
#ifndef __CUDA_ARCH__
checkCudaError(hipStreamSynchronize(stream));
#else
__syncthreads();
#endif
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsyncBuffer<maxBinaryOp>( float*,DMatrix<float>&,int, int, long, DMatrix<float> const&, maxBinaryOp<float>, float, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsyncBuffer<minBinaryOp>(float*,DMatrix<float>&,int, int, long, DMatrix<float> const&, minBinaryOp<float>, float, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsyncBuffer<maxBinaryOp>(double*,DMatrix<double>&,int, int, long, DMatrix<double> const&, maxBinaryOp<double>, double, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsyncBuffer<minBinaryOp>(double*,DMatrix<double>&,int, int, long, DMatrix<double> const&, minBinaryOp<double>, double, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsyncBuffer<maxBinaryOp>(ulong*,DMatrix<ulong>&,int, int, long, DMatrix<ulong> const&, maxBinaryOp<ulong>, ulong, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsyncBuffer<minBinaryOp>(ulong*,DMatrix<ulong>&,int, int, long, DMatrix<ulong> const&, minBinaryOp<ulong>, ulong, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsyncBuffer<minBinaryOp>(int*, DMatrix<int>&, int, int, long, DMatrix<int> const&, minBinaryOp<int>, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsyncBuffer<maxBinaryOp>(int*, DMatrix<int>&, int, int, long, DMatrix<int> const&, maxBinaryOp<int>, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<unsigned int>::reduceAsyncBuffer<minBinaryOp>(unsigned int*, DMatrix<unsigned int>&, int, int, long, DMatrix<unsigned int> const&, minBinaryOp<unsigned int>, unsigned int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<unsigned int>::reduceAsyncBuffer<maxBinaryOp>(unsigned int*, DMatrix<unsigned int>&, int, int, long, DMatrix<unsigned int> const&, maxBinaryOp<unsigned int>, unsigned int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceAsyncBuffer<minBinaryOp>(long*, DMatrix<long>&, int, int, long, DMatrix<long> const&, minBinaryOp<long>, long, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceAsyncBuffer<maxBinaryOp>(long*, DMatrix<long>&, int, int, long, DMatrix<long> const&, maxBinaryOp<long>, long, ihipStream_t*);
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsyncBuffer(float*, DMatrix<float>&, int, int, long, DMatrix<float> const&, MonoidF<float, 1>, float, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsyncBuffer(double*, DMatrix<double>&, int, int, long, DMatrix<double> const&, MonoidF<double, 1>, double, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsyncBuffer(int*, DMatrix<int>&, int, int, long, DMatrix<int> const&, MonoidF<int, 1>, int, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceAsyncBuffer(uint*, DMatrix<uint>&, int, int, long, DMatrix<uint> const&, MonoidF<uint, 1>, uint, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceAsyncBuffer(long*, DMatrix<long>&, int, int, long, DMatrix<long> const&, MonoidF<long, 1>, long, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsyncBuffer(ulong*, DMatrix<ulong>&, int, int, long, DMatrix<ulong> const&, MonoidF<ulong, 1>, ulong, ihipStream_t*);
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE T CuMatrix<T>::reduce(BinaryOp<T> op, T start, hipStream_t stream ) const
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE T CuMatrix<T>::reduce(MonoidF<T,StateDim> op, T start, hipStream_t stream ) const
#endif
{
//assert(lastMod != mod_host);
DMatrix<T> d_A;
if(checkDebug(debugRedux) ) flprintf("tiler.m_m %u, tiler.m_n %u, tiler.m_p %u m_size %lu tileSize %ld\n",
tiler.m_m,tiler.m_n,tiler.m_p, tiler.m_size, tiler.tileSize);
int roff=0, coff=0, tileM = 0, tileN = 0, tileP=0;
int tileCount = tiler.getTileCount();
//tiler.tileDims(tileM,tileN,tdRows);
//tileCount = MAX(tileCount, DIV_UP(m,tileM));
T* resA;
T res;
#ifndef __CUDA_ARCH__
cherr(hipHostMalloc(&resA,tileCount*sizeof(T),0));
#else
resA = (T*) malloc(tileCount*sizeof(T));
#endif
int lastGpu = -1;
int orgDevice = ExecCaps::currDev();
int gpuCount = tiler.countGpus();
if(checkDebug(debugRedux) ) flprintf("orgDev %d gpuCount %d\n",orgDevice, gpuCount);
hipStream_t* streams = null;
if(gpuCount > 1) {
assert(!stream);
hipStream_t* streams = (hipStream_t* ) malloc(gpuCount * sizeof(hipStream_t));
for(int i =0 ; i < gpuCount; i++) {
lastGpu = tiler.nextGpu(lastGpu);
if(gpuCount> 1)
ExecCaps_setDevice(lastGpu);
//cherr(hipSetDevice(lastGpu));
cherr(hipStreamCreateWithFlags(&streams[i],hipStreamNonBlocking));
}
}
lastGpu = -1;
if(checkDebug(debugRedux) ) flprintf("m %d, n %d, p %d, tileP %d, tiler.getTileCount() %d tileDir %s\n",m, n, p, tileP, tileCount, b_util::tileDir(tiler.tileD));
for(int tile = 0; tile < tileCount; tile++) {
if(gpuCount> 1)
ExecCaps_setDevice(lastGpu);
tiler.tile1D( d_A,roff,coff,tileM, tileN, tileP, tile, tdRows,lastMod == mod_host, lastGpu,gpuCount > 1 ? streams[tile] : stream);
if(checkDebug(debugRedux) ) util<T>::prdm("d_A", d_A);
resA[tile] = reduce(d_A, op, start, gpuCount > 1 ? streams[tile] : stream);
}
if(gpuCount > 1) {
for(int i =0 ; i < gpuCount; i++) {
cherr(hipStreamDestroy(streams[i]));
}
free(streams);
}
if(tileCount > 1) {
if(checkDebug(debugRedux) ) flprintf("reduce tileCount %d\n",tileCount);
// reduce across tile reductions
T* dres = null;
#ifndef __CUDA_ARCH__
cherr(hipMalloc(&dres, tileCount *sizeof(T)));
cherr(hipMemcpy(dres, resA, tileCount*sizeof(T), hipMemcpyHostToDevice));
#else
dres = resA;
#endif
d_A.elements = dres;
d_A.m = tileCount;
d_A.n = 1; d_A.p = 1;
res = reduce(d_A, op, start, stream);
#ifndef __CUDA_ARCH__
hipFree(dres);
#endif
} else {
if(checkDebug(debugRedux) ) flprintf("single tile reduction -> %f\n", (float) resA[0]);
res = resA[0];
}
#ifndef __CUDA_ARCH__
if(checkDebug(debugDestr))flprintf("freeing host resA %p\n", resA);
cherr(hipHostFree(resA));
#else
free(resA);
#endif
return res;
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<maxBinaryOp>(maxBinaryOp<float>, float, ihipStream_t*) const;
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<minBinaryOp>(minBinaryOp<float>, float, ihipStream_t*) const;
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<maxBinaryOp>(maxBinaryOp<double>, double, ihipStream_t*) const;
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<minBinaryOp>(minBinaryOp<double>, double, ihipStream_t*) const;
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<maxBinaryOp>(maxBinaryOp<int>, int, ihipStream_t*) const;
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<minBinaryOp>(minBinaryOp<int>, int, ihipStream_t*) const;
template __host__ CUDART_DEVICE long CuMatrix<long>::reduce<maxBinaryOp>(maxBinaryOp<long>, long, ihipStream_t*) const;
template __host__ CUDART_DEVICE long CuMatrix<long>::reduce<minBinaryOp>(minBinaryOp<long>, long, ihipStream_t*) const;
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<maxBinaryOp>(maxBinaryOp<unsigned int>, unsigned int, ihipStream_t*) const;
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<minBinaryOp>(minBinaryOp<unsigned int>, unsigned int, ihipStream_t*) const;
template __host__ CUDART_DEVICE unsigned long CuMatrix<unsigned long>::reduce<maxBinaryOp>(maxBinaryOp<unsigned long>, unsigned long, ihipStream_t*) const;
template __host__ CUDART_DEVICE unsigned long CuMatrix<unsigned long>::reduce<minBinaryOp>(minBinaryOp<unsigned long>, unsigned long, ihipStream_t*) const;
#else
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce(MonoidF<float,1>, float, ihipStream_t*) const;
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce(MonoidF<double,1>, double, ihipStream_t*) const;
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce(MonoidF<int,1>, int, ihipStream_t*) const;
template __host__ CUDART_DEVICE uint CuMatrix<uint>::reduce(MonoidF<uint,1>, uint, ihipStream_t*) const;
template __host__ CUDART_DEVICE ulong CuMatrix<ulong>::reduce(MonoidF<ulong,1>, ulong, ihipStream_t*) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE T CuMatrix<T>::reduceColumn(BinaryOp<T> op, T start, int col, hipStream_t stream ) const
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE T CuMatrix<T>::reduceColumn(MonoidF<T,StateDim> op, T start, int col, hipStream_t stream ) const
{
DMatrix<T> d_A;
#endif
T* resA;
T res;
int lastGpu = 0;
int orgDevice = ExecCaps::currDev();
int gpuCount = tiler.countGpus();
int tileCount = tiler.getTileCount();
hipStream_t* streams = null;
if(gpuCount > 1) {
assert(!stream);
streams = (hipStream_t* ) malloc(gpuCount * sizeof(hipStream_t));
for(int i =0 ; i < gpuCount; i++) {
lastGpu = tiler.nextGpu(lastGpu);
#ifndef __CUDA_ARCH__
cherr(hipSetDevice(lastGpu));
#endif
cherr(hipStreamCreateWithFlags(&streams[i],hipStreamNonBlocking));
}
}
lastGpu = tiler.nextGpu(0);
int roff, coff, tileM = 0, tileN = 0, tileP = 0;
#ifndef __CUDA_ARCH__
cherr(hipHostMalloc(&resA,tileCount*sizeof(T),0));
#else
resA = (T*) malloc(tileCount*sizeof(T));
#endif
for(int tile = 0; tile < tileCount; tile++) {
// tile1D(DMatrix<T>& dm, int& roff, int& coff,int& tileM, int& tileN, int& tileP, int t, TileDirection tileD = tdRows, bool copy = true, int lastGpu = -1, hipStream_t stream = 0)
tiler.tile1D(d_A, roff, coff, tileM, tileN, tileP, tile, tdRows, true);
reduceColumn(resA + tile, d_A, op, start, col, stream);
}
if(tileCount > 1) {
if(checkDebug(debugRedux) ) flprintf("reduce across %d tile reductions %d\n",tileCount);
// reduce across tile reductions
T* dres = null;
#ifndef __CUDA_ARCH__
cherr(hipMalloc(&dres, tileCount*sizeof(T)));
cherr(hipMemcpy(dres, resA, tileCount*sizeof(T), hipMemcpyHostToDevice));
#else
dres = resA;
#endif
d_A.elements = dres;
d_A.m = tileCount;
d_A.n = 1; d_A.p = 1;
res = reduce(d_A, op, start, stream);
#ifndef __CUDA_ARCH__
hipFree(dres);
#endif
} else {
if(checkDebug(debugRedux) ) flprintf("single tile reduction -> %f\n", (float) resA[0]);
res = resA[0];
}
#ifndef __CUDA_ARCH__
if(checkDebug(debugDestr))flprintf("freeing host resA %p\n", resA);
cherr(hipHostFree(resA));
#else
free(resA);
#endif
return res;
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE float CuMatrix<float>::reduceColumn<plusBinaryOp>(plusBinaryOp<float>, float, int, ihipStream_t*) const;
template __host__ CUDART_DEVICE double CuMatrix<double>::reduceColumn<plusBinaryOp>(plusBinaryOp<double>, double, int, ihipStream_t*) const;
template __host__ CUDART_DEVICE unsigned long CuMatrix<unsigned long>::reduceColumn<plusBinaryOp>(plusBinaryOp<unsigned long>, unsigned long, int, ihipStream_t*) const;
#else
template __host__ CUDART_DEVICE float CuMatrix<float>::reduceColumn<1>(MonoidF<float,1>, float, int, ihipStream_t*) const;
template __host__ CUDART_DEVICE double CuMatrix<double>::reduceColumn<1>(MonoidF<double,1>, double, int, ihipStream_t*) const;
template __host__ CUDART_DEVICE int CuMatrix<int>::reduceColumn<1>(MonoidF<int,1>, int, int, ihipStream_t*) const;
template __host__ CUDART_DEVICE uint CuMatrix<uint>::reduceColumn<1>(MonoidF<uint,1>, uint, int, ihipStream_t*) const;
template __host__ CUDART_DEVICE ulong CuMatrix<ulong>::reduceColumn<1>(MonoidF<ulong,1>, ulong, int, ihipStream_t*) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE void CuMatrix<T>::reduceAsync(T* result, BinaryOp<T> op, T start, hipStream_t stream ) const
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::reduceAsync(T* result, MonoidF<T,StateDim> op, T start, hipStream_t stream ) const
#endif
{
reduce(op, start, stream);
}
#ifdef CuMatrix_Enable_KTS
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsync<1>(float*, MonoidF<float, 1>, float, ihipStream_t*) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsync<1>(double*, MonoidF<double, 1>, double, ihipStream_t*) const;
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsync<1>(int*, MonoidF<int, 1>, int, ihipStream_t*) const;
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceAsync<1>(uint*, MonoidF<uint, 1>, uint, ihipStream_t*) const;
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsync<1>(ulong*, MonoidF<ulong, 1>, ulong, ihipStream_t*) const;
#endif
// reduction with addition ()
template<typename T> __host__ CUDART_DEVICE T CuMatrix<T>::sum(hipStream_t stream ) const {
//T res[factor];
#ifndef __CUDA_ARCH__
if(checkDebug(debugRedux)){
flprintf("this %p %dx%dx%d elems %p dev %p lastMod %s\n", this, m,n,p,elements, tiler.buff(), b_util::modStr(lastMod));
printColoArray<T>(elements,20);
printDevArray<T>(tiler.buff(),"EVE",-1,20);
}
#endif
#ifdef CuMatrix_Enable_KTS
T res = reduce( plusBinaryOp<T>(), 0 , stream);
#else
T res = reduce(Functory<T,plusBinaryOp>::pinch(), 0 , stream);
#endif
return res;
}
template<typename T> __host__ T CuMatrix<T>::kahanSum() const {
if(lastMod == mod_device) {
dthrow(notSyncedHost());
}
T sum = 0;
T c = 0;
for(int i = 0; i < m * p ;i++) {
if(i == m * p -1 ){
if(checkDebug(debugRedux)) outln("last idx " << i << ", (elements + idx) = " << (elements + i));
}
if(i % p < n) { // verify idx inside meat
T y = elements[i] - c;
T t = sum + y;
c = (t - sum) - y;
sum = t;
}/*else {
if(checkDebug(debugRedux)) outln("skipping idx " << i << " ( i %p == ) " << (i %p));
}*/
}
return sum;
}
// reduction with multiplication ()
template<typename T> __host__ CUDART_DEVICE T CuMatrix<T>::prod( hipStream_t stream ) const {
#ifdef CuMatrix_Enable_KTS
T res = reduce( multBinaryOp<T>(), 1.0, stream);
#else
T res = reduce( Functory<T,multBinaryOp>::pinch(), 1.0, stream);
#endif
return res;
}
template<typename T> void CuMatrix<T>::featureMeans( CuMatrix<T>& means, bool lv) const {
DMatrix<T> d_Means, d_X;
int roff, coff;
int tileM, tileN, tileP;
tiler.tileDims(tileM, tileN, tileP, tdCols);
int tileCount = DIV_UP(m,tileM);
for(int i = 0; i < tileCount; i++) {
tiler.tileLike(d_X, roff,coff, tileM, tileN, tileP, i, tdCols, true);
if(checkDebug(debugTiler))prlocf("means tiling");
means.tiler.tileLike(d_Means, roff,coff, tileM, tileN, tileP, i, tdCols, true);
if(vectorQ()) {
means.set(0, sum()/length());
} else {
featureAvgKernelL(d_Means, d_X, lv);
}
means.tiler.syncTile(d_Means, roff, coff);
}
means.invalidateHost();
}
template<typename T> void CuMatrix<T>::featureMeansTx( CuMatrix<T>& means) const {
DMatrix<T> d_Means, d_X;
int roff,coff;
int tileM, tileN, tileP;
tiler.tileDims(tileM, tileN, tileP, tdRows); // todo check this
int tileCount = DIV_UP(m,_tileM);
for(int i = 0; i < tileCount; i++) {
tiler.tileLike(d_X, roff,coff, tileM, tileN, tileP, i, tdRows, true);
means.tiler.tileLike(d_Means, roff,coff, tileM, tileN, tileP, i, tdRows, true);
featureAvgTxdKernelL(d_Means, d_X);
means.tiler.syncTile(d_Means, roff, coff);
}
means.invalidateHost();
}
template<typename T> void CuMatrix<T>::featureMeansStreams( CuMatrix<T>& means, bool lv,int nstreams) const {
DMatrix<T> d_Means, d_X;
int roff, coff;
int tileCount = tiler.getTileCount();
int tileM, tileN, tileP;
tiler.tileDims(tileM, tileN, tileP, tdCols); // todo check this
for(int i = 0; i < tileCount; i++) {
tiler.tileLike(d_X, roff,coff, tileM, tileN, tileP, i, tdCols, true);
means.tiler.tileLike(d_Means, roff,coff, tileM, tileN, tileP, i, tdCols, true);
//outln("d_Means " << util<T>::pdm(d_Means));
featureAvgMultiStreamL(d_Means, d_X, lv, nstreams);
means.tiler.syncTile(d_Means, roff, coff);
}
means.invalidateHost();
}
template<typename T> CuMatrix<T> CuMatrix<T>::featureMeans(bool lv) const {
CuMatrix<T> means = zeros(n, 1);
featureMeans(means, lv);
return means;
}
// matrix is transposed and average calced by doing sum-reductions of each row
// one thread (in x dir) for each row
template<typename T> __global__ void rowSumKernel2(DMatrix<T> sums, const DMatrix<T> xTxd) {
uint rowIdx = blockIdx.x * blockDim.x + threadIdx.x; // index into column of un-txd source matrix
if(checkDebug(debugRedux) && rowIdx == 0) {
util<T>::printRow(xTxd, rowIdx);
}
__syncthreads();
#ifdef CuMatrix_Enable_Cdp
cherr(hipPeekAtLastError());
#endif
// T* sdata = SharedMemory<T>();
DMatrix<T> row(xTxd.elements + xTxd.p * rowIdx, 1, xTxd.n);
__syncthreads();
#ifdef CuMatrix_Enable_Cdp
cherr(hipPeekAtLastError());
#endif
if(checkDebug(debugRedux) && rowIdx == xTxd.m - 1) {
prlocf("last row as ");
util<T>::printRow(row, 0);
}
if(rowIdx < xTxd.m) {
if(checkDebug(debugRedux)) {
flprintf("reducing row %d\n",rowIdx);
}
T sum = 0;
#ifdef CuMatrix_Enable_Cdp
sum = CuMatrix<T>::reduce(row, Functory<T,plusBinaryOp>::pinch(),0);
// flprintf("sum %g\n", sum);
#else
prlocf("not implemented for non-cdp\n");
assert(false);
#endif
if(checkDebug(debugRedux)) {
flprintf("row %d sum %f\n",rowIdx, sum);
}
sums.elements[rowIdx]= sum;
}
__syncthreads();
#ifdef CuMatrix_Enable_Cdp
cherr(hipPeekAtLastError());
#endif
}
// sum reduces each row
template<typename T> __host__ CUDART_DEVICE void CuMatrix<T>::rowSum(DMatrix<T>& d_rowSums, const DMatrix<T>& d_x, hipStream_t stream) {
cherr(hipPeekAtLastError());
uint blockX = MIN(256, d_x.m);
uint gridX = blockX >= d_x.m ? 1 : DIV_UP(d_x.m,blockX);
if(checkDebug(debugRedux)){
prlocf("rowSum on ");
util<T>::prdm(d_x);
flprintf(" with gridX %d and blockX %d\n",gridX,blockX);
}
//b_util::pFuncPtrAtts((T*)rowSumKernel2<T>);
bool valid = b_util::validLaunchQ((void*)rowSumKernel2<T>,dim3(gridX), dim3(blockX));
if(checkDebug(debugRedux))flprintf("valid %s\n", tOrF(valid));
hipLaunchKernelGGL(( rowSumKernel2), dim3(gridX), dim3(blockX), 0, stream, d_rowSums, d_x);
#ifdef __CUDA_ARCH__
__syncthreads();
#else
cherr(hipDeviceSynchronize());
#endif
}
//template<typename T> __global__ smallestMutalFactor(uint* factor, )
/*
* works but bad shuffln
template<typename T, int StateDim> __global__ void rowReductionKernelNlte64(DMatrix<T> resVec, MonoidF bop, const DMatrix<T> x, uint slice, uint slices) {
assert(x.n < 65);
// shared mem to relay partial sums of rows that span warps
int col = threadIdx.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if(row < x.m && col < x.n) {
// test for elements processed by by cols with laneid > WARP_SIZE
int2 laneid = b_util::laneId(threadIdx, blockIdx, blockDim);
ulong soffset = slice * x.m * x.p/slices;
uint toffset = slice * x.m * resVec.p/slices;
int currLen = x.n - 1;
T* xrow = x.elements + soffset + row * x.p;
// first reduction fills warp
T s = col < x.n ? xrow[col] : bop.identity;
if( col + WARP_SIZE < x.n )
s = bop(s,xrow[col + WARP_SIZE]);
__syncthreads();
while(currLen > 0) {
int dLane = col == 0 ? currLen : 0;
if(WARP_SIZE - laneid.y < x.n - col) {
flnfprintf("row %u laneid %u.%u y + x.n-col %d > ws\n", row, laneid.x,laneid.y, x.n-col);
// on a spanrow, so explicitly load elems of spanrow seg in 2nd warp
if(col + dLane < x.n) {
flnfprintf("loading lane %u.%u from next warps for row %u col %u s was %f\n", laneid.x + 1 , WARP_SIZE - (laneid.y + dLane), row,col,s);
s = bop(s, xrow[col+dLane]);
flnfprintf("s is %f\n",s);
} else {
flnfprintf("ignoring lane %u from next warps for row %u col %u\n" , (laneid.y + dLane - WARP_SIZE), row,col);
}
} else {
T os = shflDown<T>(s, dLane );
// (laneid<x.n && laneid > col) skips elems of segment of spanrow in 2nd warp
s = col == 0 ? bop(s, os) : s;
if(checkDebug(debugRedux)) flnfprintf("x.n %u row %u col %u lane id %u.%u dLane %d os %f s %f\n", x.n, row, col, laneid.x,laneid.y, dLane, os,s);
}
currLen--;
}
if(col == 0) {
resVec.elements[row + toffset] = s;
}
}
}
*/
/*
* todo version for well-formed matrices
* needs smem T for each spanrow that holds partial reduction of 1st part of row
*/
#ifdef CuMatrix_Enable_KTS
template<typename T, template <typename> class BinaryOpF> __global__
void rowReductionKernelNlte64(DMatrix<T> resVec, BinaryOpF<T> bop, const DMatrix<T> x, uint slice, uint slices)
#else
template<typename T, int StateDim> __global__
void rowReductionKernelNlte64(DMatrix<T> resVec, MonoidF<T,StateDim> bop, const DMatrix<T> x, uint slice, uint slices)
#endif
{
assert(x.n < 65);
// shared mem to relay partial sums of rows that span warps
int col = threadIdx.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
// thread in mat bounds
if(row < x.m && col < x.n) {
uint laneid;
b_util::laneid(laneid);
ulong soffset = slice * x.m * x.p;
uint toffset = slice * x.m * resVec.p;
if(checkDebug(debugRedux) && col == 0 && row == 0) {
flprintf("slice %u soffset %lu toffset %u\n", slice, soffset, toffset);
}
int currLen = MIN(WARP_SIZE/2, b_util::nextPowerOf2(x.n)/2);
T* xrow = x.elements + soffset + row * x.p;
// first reduction fills warp local ses
T s = col < x.n ? xrow[col] : bop.identity_ro();
if( col + WARP_SIZE < x.n )
s = bop(s,xrow[col + WARP_SIZE]);
__syncthreads();
if(col == 0 && x.n > WARP_SIZE - laneid && x.n < WARP_SIZE) {
// row spans warps, so don't shuffle
for(int i =1; i < x.n; i++) {
s = bop(s, xrow[col + i]);
}
} else {
while(currLen > 0) {
int dLane = currLen; // col == 0 ? currLen : 0;
// check for beginning of a warp spanrow
T os = shflDown<T>(s, dLane);
// (laneid<x.n && laneid > col) skips elems of segment of spanrow in 2nd warp
if(col > 0 && col > laneid ) {
// todo retrieve partial redux of 1st part of span row and reduce rest of spanrow with that
//if(checkDebug(debugRedux)) flnfprintf("skipn x.n %u row %u col %u lane id %u dLane %d os %f s %f\n", x.n, row, col, laneid, dLane, os,s);
} else if(col + dLane < x.n){
s = bop(s, os);
//if(checkDebug(debugRedux)) flnfprintf("bopn x.n %u row %u col %u lane id %u dLane %d os %f s %f\n", x.n, row, col, laneid, dLane, os,s);
}
currLen >>= 1;
}
}
if(col == 0) {
resVec.elements[row + toffset] = s;
}
}
}
//
// 'slices' are row-wise
// stripes of 64 cols are col-wise
#ifdef CuMatrix_Enable_KTS
template<typename T, template <typename> class BinaryOpF> __global__ void rowReductionKernel(DMatrix<T> resVec, BinaryOpF<T> bop, const DMatrix<T> x, int slice, int slices, int stripes)
#else
template<typename T, int StateDim> __global__ void rowReductionKernel(DMatrix<T> resVec, MonoidF<T,StateDim> bop, const DMatrix<T> x, int slice, int slices, int stripes)
#endif
{
int col = threadIdx.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
ulong soffset = slice * x.m * x.p;
int toffset = slice * x.m * resVec.p;
for(int stripe = 0; stripe < stripes; stripe++) {
int currLen = MIN(WARP_SIZE/2, b_util::nextPowerOf2(x.n)/2);
T* xrow = x.elements + soffset + row * x.p + stripe * 64;
// first reduction fills warp
T s = col < x.n ? xrow[col] : bop.identity_ro();
if( col + WARP_SIZE < x.n )
s = bop(s,xrow[col + WARP_SIZE]);
__syncthreads();
while(currLen > 0) {
int lane = col + currLen;
s = bop(s, shfl<T>(s, lane));
currLen >>= 1;
}
if(threadIdx.x == 0) {
resVec.elements[row * resVec.p + stripe + toffset] = s;
}
}
__syncthreads();
assert(stripes < 65);
int currLen = MIN(WARP_SIZE/2, b_util::nextPowerOf2(stripes)/2);
T* resVecRow = resVec.elements + toffset + row * resVec.p;
// first reduction fills warp
T s = col < stripes ? resVecRow[col] : bop.identity_ro();
if( col + WARP_SIZE < x.n )
s = bop(s,resVecRow[col + WARP_SIZE]);
__syncthreads();
while(currLen > 0) {
int lane = col + currLen;
s = bop(s, shfl<T>(s, lane));
currLen >>= 1;
}
if(threadIdx.x == 0) {
resVec.elements[row + toffset] = s;
}
}
#ifdef CuMatrix_Enable_KTS
template <typename T> template <template <typename> class BinaryOp> __host__ CUDART_DEVICE void CuMatrix<T>::reduceRowsNlte64(DMatrix<T>& resVec, const DMatrix<T>& d_x, BinaryOp<T> op, hipStream_t stream )
#else
template <typename T> template <int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::reduceRowsNlte64(DMatrix<T>& resVec, const DMatrix<T>& d_x, MonoidF<T,StateDim> op, hipStream_t stream )
#endif
{
assert(d_x.n <= WARP_SIZE*2);
ExecCaps * pcaps = ExecCaps::currCaps();
uint blockW= MIN(WARP_SIZE, d_x.n);
uint blockH = MIN(d_x.m, maxH<T>(*pcaps,blockW));
if(checkDebug(debugRedux))flprintf("for d_x %ux%ux%u first %p last %p\n", d_x.m,d_x.n, d_x.p, d_x.elements, d_x.elements + (d_x.m -1)*d_x.p + d_x.n -1);
if(checkDebug(debugRedux))flprintf("blockH %d maxH<T>(*pcaps,blockW) %d\n",blockH, maxH<T>(*pcaps,blockW));
int gridY = DIV_UP(d_x.m, blockH);
dim3 grid(1, gridY);
// in case grid y is too big
int slices = DIV_UP(grid.y, pcaps->maxGrid.y);
if(checkDebug(debugRedux))flprintf("slices %d\n",slices);
dim3 block(blockW,blockH);
int sliceGridY = grid.y/ slices;
DMatrix<T> d_slice(d_x);
d_slice.m = sliceGridY * blockH;
if(checkDebug(debugRedux))flprintf("init sliceGridY %d d_slice.m %d\n",sliceGridY, d_slice.m);
int offset;
for(int currSlice =0; currSlice < slices; currSlice++) {
offset = currSlice * d_slice.m * d_x.p ;
if(currSlice == slices - 1) {
if(checkDebug(debugRedux))prlocf("last fill slice");
d_slice.m = d_x.m - (slices - 1 ) * d_slice.m;
sliceGridY = DIV_UP(d_slice.m, blockH);
}
grid.y = sliceGridY;
if(checkDebug(debugRedux)){
flprintf("sliceGridY %d\n",sliceGridY);
flprintf("slice %d on mat offset %d %dX%d(X%d) (d_slice.elements 1st %p last %p)\n",
currSlice, offset, d_slice.m, d_slice.n, d_slice.p, d_slice.elements+ offset,d_slice.elements+ offset + (d_slice.m-1)* d_slice.p + d_slice.n -1);
b_util::prd3(grid, " grid of ");
b_util::prd3(block, "block of");
}
hipLaunchKernelGGL(( rowReductionKernelNlte64), dim3(grid), dim3(block), 0, stream, resVec, op, d_slice, currSlice, slices);
}
cherr(hipDeviceSynchronize());
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceRows<plusBinaryOp>(DMatrix<float>&, DMatrix<float> const&, plusBinaryOp<float>, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceRows<plusBinaryOp>(DMatrix<double>&, DMatrix<double> const&, plusBinaryOp<double>, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceRows<plusBinaryOp>(DMatrix<long>&, DMatrix<long> const&, plusBinaryOp<long>, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceRows<plusBinaryOp>(DMatrix<ulong>&, DMatrix<ulong> const&, plusBinaryOp<ulong>, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceRows<plusBinaryOp>(DMatrix<int>&, DMatrix<int> const&, plusBinaryOp<int>, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<unsigned int>::reduceRows<plusBinaryOp>(DMatrix<unsigned int>&, DMatrix<unsigned int> const&, plusBinaryOp<unsigned int>, ihipStream_t*);
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceRows(DMatrix<float>&, DMatrix<float> const&, MonoidF<float,1>, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceRows(DMatrix<double>&, DMatrix<double> const&, MonoidF<double,1>, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceRows(DMatrix<long>&, DMatrix<long> const&, MonoidF<long,1>, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceRows(DMatrix<ulong>&, DMatrix<ulong> const&, MonoidF<ulong,1>, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceRows(DMatrix<int>&, DMatrix<int> const&, MonoidF<int,1>, ihipStream_t*);
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceRows(DMatrix<uint>&, DMatrix<uint> const&, MonoidF<uint,1>, ihipStream_t*);
#endif
#ifdef CuMatrix_Enable_KTS
template <typename T> template <template <typename> class BinaryOp> __host__ CUDART_DEVICE void CuMatrix<T>::reduceRows(DMatrix<T>& resVec, const DMatrix<T>& d_x, BinaryOp<T> op, hipStream_t stream )
#else
template <typename T> template <int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::reduceRows(DMatrix<T>& resVec, const DMatrix<T>& d_x, MonoidF<T,StateDim> op, hipStream_t stream )
#endif
{
if(d_x.n < 65) {
reduceRowsNlte64(resVec,d_x,op, stream);
}
}
#include "CuMatrixInster.cu"
| 72b0a9fa14e3a706299a1c990a4e63568187fb28.cu | /*
* CuMatrixReductions.cu
*
* basic reduction kernels, exec ctx
* Author: reid
*/
#include "CuMatrix.h"
#include "Kernels.h"
#include "caps.h"
#include "util.h"
#include "CuDefs.h"
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE
T CuMatrix<T>::reduce(const DMatrix<T>& d_M, BinaryOp<T> op, T start, cudaStream_t stream )
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE
T CuMatrix<T>::reduce(const DMatrix<T>& d_M, MonoidF<T,StateDim> op, T start, cudaStream_t stream )
#endif
{
long nP = d_M.m * d_M.n;
if(nP == 1) {
T result;
CuTimer timer;
timer.start();
#ifndef __CUDA_ARCH__
cherr(cudaMemcpy(&result, d_M.elements, sizeof(T), cudaMemcpyDeviceToHost));
//CuMatrix<T>::incDhCopy("CuMatrix<T>::reduce(long l)", sizeof(T),timer.stop());
#else
memcpy(&result, d_M.elements, sizeof(T));
#endif
return result;
}
int threads;
int blocks;
::getReductionExecContext(blocks, threads, nP);
if(checkDebug(debugRedux )) flprintf("CuMatrix<T>::reduce blocks %d threads %d np %d\n", blocks,threads, nP);
#ifdef CuMatrix_Enable_Cdp
cherr(cudaPeekAtLastError());
#endif
//CuMatrix<T> res(blocks, 1, false, true);
cherr(cudaPeekAtLastError());
if(checkDebug(debugRedux)) {
prlocf("res ");
//res.printShortString();
}
DMatrix<T> d_Res(blocks,1);
cherr(cudaMalloc( &(d_Res.elements), blocks*sizeof(T)));
//res.tile0(d_Res, false);
if(checkDebug(debugRedux)) {
prlocf("after res.tile0(..)\n");
}
#ifndef __CUDA_ARCH__
checkCudaError(cudaDeviceSynchronize());
if(checkDebug(debugRedux)) prlocf("after tile0");
if(checkDebug(debugRedux)) prlocf("host \n");
#else
if(checkDebug(debugRedux)) prlocf("dev \n");
#endif
T total = 0;
if(checkDebug(debugRedux)) flprintf("&total %p\n",&total);
if(checkDebug(debugRedux)) flprintf("curr dev %d device of d_m.elems %d device of d_Res.elems %d\n",ExecCaps::currDev(),
b_util::getDevice(d_M.elements), b_util::getDevice(d_Res.elements));
if(checkDebug(debugRedux)) flprintf("d_M.m,d_M.n %d d_M.p %d stride %d\n",d_M.m, d_M.n, d_M.p ,d_M.n != d_M.p ? d_M.p : 1);
reduceLauncher(&total, d_Res, nP, d_M, op, start, 0, stream);
#ifndef __CUDA_ARCH__
cherr(cudaPeekAtLastError());
cherr(cudaStreamSynchronize(stream));
#else
#ifdef CuMatrix_Enable_Cdp
cherr(cudaPeekAtLastError());
#endif
__syncthreads();
#endif
if(checkDebug(debugRedux))flprintf("total now %f\n",total);
cherr(cudaFree( d_Res.elements));
return total;
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<maxBinaryOp>(DMatrix<float> const&, maxBinaryOp<float>, float, CUstream_st*);
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<minBinaryOp>(DMatrix<float> const&, minBinaryOp<float>, float, CUstream_st*);
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<maxBinaryOp>(DMatrix<double> const&, maxBinaryOp<double>, double, CUstream_st*);
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<minBinaryOp>(DMatrix<double> const&, minBinaryOp<double>, double, CUstream_st*);
template __host__ CUDART_DEVICE ulong CuMatrix<ulong>::reduce<minBinaryOp>(DMatrix<ulong> const&, minBinaryOp<ulong>, ulong, CUstream_st*);
template __host__ CUDART_DEVICE ulong CuMatrix<ulong>::reduce<maxBinaryOp>(DMatrix<ulong> const&, maxBinaryOp<ulong>, ulong, CUstream_st*);
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<maxBinaryOp>(DMatrix<int> const&, maxBinaryOp<int>, int, CUstream_st*);
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<minBinaryOp>(DMatrix<int> const&, minBinaryOp<int>, int, CUstream_st*);
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<maxBinaryOp>(DMatrix<unsigned int> const&, maxBinaryOp<unsigned int>, unsigned int, CUstream_st*);
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<minBinaryOp>(DMatrix<unsigned int> const&, minBinaryOp<unsigned int>, unsigned int, CUstream_st*);
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<andBinaryOp>(DMatrix<float> const&, andBinaryOp<float>, float, CUstream_st*);
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<andBinaryOp>(DMatrix<double> const&, andBinaryOp<double>, double, CUstream_st*);
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<andBinaryOp>(DMatrix<int> const&, andBinaryOp<int>, int, CUstream_st*);
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<andBinaryOp>(DMatrix<unsigned int> const&, andBinaryOp<unsigned int>, unsigned int, CUstream_st*);
template __host__ CUDART_DEVICE long CuMatrix<long>::reduce<andBinaryOp>(DMatrix<long> const&, andBinaryOp<long>, long, CUstream_st*);
template __host__ CUDART_DEVICE unsigned long CuMatrix<unsigned long>::reduce<andBinaryOp>(DMatrix<unsigned long> const&, andBinaryOp<unsigned long>, unsigned long, CUstream_st*);
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<orBinaryOp>(DMatrix<float> const&, orBinaryOp<float>, float, CUstream_st*);
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<orBinaryOp>(DMatrix<double> const&, orBinaryOp<double>, double, CUstream_st*);
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<orBinaryOp>(DMatrix<int> const&, orBinaryOp<int>, int, CUstream_st*);
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<orBinaryOp>(DMatrix<unsigned int> const&, orBinaryOp<unsigned int>, unsigned int, CUstream_st*);
template __host__ CUDART_DEVICE long CuMatrix<long>::reduce<orBinaryOp>(DMatrix<long> const&, orBinaryOp<long>, long, CUstream_st*);
template __host__ CUDART_DEVICE unsigned long CuMatrix<unsigned long>::reduce<orBinaryOp>(DMatrix<unsigned long> const&, orBinaryOp<unsigned long>, unsigned long, CUstream_st*);
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<plusBinaryOp>(DMatrix<float> const&, plusBinaryOp<float>, float, CUstream_st*);
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<plusBinaryOp>(DMatrix<double> const&, plusBinaryOp<double>, double, CUstream_st*);
template __host__ CUDART_DEVICE ulong CuMatrix<ulong>::reduce<plusBinaryOp>(DMatrix<ulong> const&, plusBinaryOp<ulong>, ulong, CUstream_st*);
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<plusBinaryOp>(DMatrix<int> const&, plusBinaryOp<int>, int, CUstream_st*);
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<plusBinaryOp>(DMatrix<unsigned int> const&, plusBinaryOp<unsigned int>, unsigned int, CUstream_st*);
template __host__ CUDART_DEVICE long CuMatrix<long>::reduce<plusBinaryOp>(DMatrix<long> const&, plusBinaryOp<long>, long, CUstream_st*);
#else
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce(DMatrix<float> const&, MonoidF<float,1>, float, CUstream_st*);
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce(DMatrix<double> const&, MonoidF<double,1>, double, CUstream_st*);
template __host__ CUDART_DEVICE long CuMatrix<long>::reduce(DMatrix<long> const&, MonoidF<long,1>, long, CUstream_st*);
template __host__ CUDART_DEVICE ulong CuMatrix<ulong>::reduce(DMatrix<ulong> const&, MonoidF<ulong,1>, ulong, CUstream_st*);
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce(DMatrix<int> const&, MonoidF<int,1>, int, CUstream_st*);
template __host__ CUDART_DEVICE uint CuMatrix<uint>::reduce(DMatrix<uint> const&, MonoidF<uint,1>, uint, CUstream_st*);
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE
void CuMatrix<T>::reduceColumn(T* total, const DMatrix<T>& d_M, BinaryOp<T> op, T start, int col, cudaStream_t stream )
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE
void CuMatrix<T>::reduceColumn(T* total, const DMatrix<T>& d_M, MonoidF<T,StateDim> op, T start, int col, cudaStream_t stream )
#endif
{
long nP = d_M.m;
int threads;
int blocks;
::getReductionExecContext(blocks, threads, nP);
if(checkDebug(debugRedux))flprintf("CuMatrix<T>::reduceColumn blocks %d threads %d np %d\n", blocks,threads, nP);
#ifdef CuMatrix_Enable_Cdp
cherr(cudaPeekAtLastError());
#endif
CuMatrix<T> res(blocks, 1, false, true);
if(checkDebug(debugRedux)) {
prlocf("res ");
res.printShortString();
}
DMatrix<T> d_Res;
res.tile0(d_Res, false);
if(checkDebug(debugRedux)) {
prlocf("after res.tile0(..)\n");
}
#ifndef __CUDA_ARCH__
checkCudaError(cudaDeviceSynchronize());
if(checkDebug(debugRedux)){ prlocf("host ");}
#else
if(checkDebug(debugRedux)){ prlocf("dev ");}
#endif
if(checkDebug(debugRedux)){ flprintf("&total %p\n",&total); }
reduceLauncher(total, d_Res, nP, d_M, op, start, col, stream);
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceColumn<plusBinaryOp>(float*, DMatrix<float> const&, plusBinaryOp<float>, float, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceColumn<plusBinaryOp>(int*, DMatrix<int> const&, plusBinaryOp<int>, int, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceColumn<plusBinaryOp>(long*, DMatrix<long> const&, plusBinaryOp<long>, long, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<unsigned int>::reduceColumn<plusBinaryOp>(unsigned int*, DMatrix<unsigned int> const&, plusBinaryOp<unsigned int>, unsigned int, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceColumn<plusBinaryOp>(double*, DMatrix<double> const&, plusBinaryOp<double>, double, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<unsigned long>::reduceColumn<plusBinaryOp>(unsigned long*, DMatrix<unsigned long> const&, plusBinaryOp<unsigned long>, unsigned long, int, CUstream_st*);
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceColumn(float*, DMatrix<float> const&, MonoidF<float, 1>, float, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceColumn(double*, DMatrix<double> const&, MonoidF<double, 1>, double, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceColumn(int*, DMatrix<int> const&, MonoidF<int, 1>, int, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceColumn(uint*, DMatrix<uint> const&, MonoidF<uint, 1>, uint, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceColumn(long*, DMatrix<long> const&, MonoidF<long, 1>, long, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceColumn(ulong*, DMatrix<ulong> const&, MonoidF<ulong, 1>, ulong, int, CUstream_st*);
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE void CuMatrix<T>::reduceAsync(
T* result, const DMatrix<T>& d_M, BinaryOp<T> op, T start, cudaStream_t stream )
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::reduceAsync(
T* result, const DMatrix<T>& d_M, MonoidF<T,StateDim> op, T start, cudaStream_t stream )
#endif
{
long nP = d_M.m * d_M.n;
int threads;
int blocks;
::getReductionExecContext(blocks, threads, nP);
if(checkDebug(debugExec)) flprintf("reduceAsync blocks %d\n", blocks);
CuMatrix<T> res(blocks, 1, true, true);
DMatrix<T> d_Res;
res.tile0(d_Res, false);
reduceLauncher(result, d_Res, nP, d_M, op, start, 0, stream);
#ifndef __CUDA_ARCH__
checkCudaError(cudaStreamSynchronize(stream));
#else
__syncthreads();
#endif
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsync<maxBinaryOp>(float*,DMatrix<float> const&, maxBinaryOp<float>, float, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsync<minBinaryOp>(float*,DMatrix<float> const&, minBinaryOp<float>, float, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsync<maxBinaryOp>(double*,DMatrix<double> const&, maxBinaryOp<double>, double, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsync<minBinaryOp>(double*,DMatrix<double> const&, minBinaryOp<double>, double, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsync<maxBinaryOp>(ulong*,DMatrix<ulong> const&, maxBinaryOp<ulong>, ulong, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsync<minBinaryOp>(ulong*,DMatrix<ulong> const&, minBinaryOp<ulong>, ulong, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsync<minBinaryOp>(int*, DMatrix<int> const&, minBinaryOp<int>, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsync<maxBinaryOp>(int*, DMatrix<int> const&, maxBinaryOp<int>, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceAsync<minBinaryOp>(uint*, DMatrix<uint> const&, minBinaryOp<uint>, uint, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceAsync<maxBinaryOp>(uint*, DMatrix<uint> const&, maxBinaryOp<uint>, uint, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceAsync<minBinaryOp>(long*, DMatrix<long> const&, minBinaryOp<long>, long, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceAsync<maxBinaryOp>(long*, DMatrix<long> const&, maxBinaryOp<long>, long, CUstream_st*);
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsync<1>(float*,DMatrix<float> const&, MonoidF<float,1>, float, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsync<1>(double*,DMatrix<double> const&, MonoidF<double,1>, double, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsync<1>(int*,DMatrix<int> const&, MonoidF<int,1>, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceAsync<1>(uint*,DMatrix<uint> const&, MonoidF<uint,1>, uint, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceAsync<1>(long*,DMatrix<long> const&, MonoidF<long,1>, long, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsync<1>(ulong*,DMatrix<ulong> const&, MonoidF<ulong,1>, ulong, CUstream_st*);
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE void CuMatrix<T>::reduceAsyncBuffer(
T* result, DMatrix<T>& buffer, int blocks, int threads, long nP, const DMatrix<T>& d_M, BinaryOp<T> op, T start, cudaStream_t stream )
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::reduceAsyncBuffer(
T* result, DMatrix<T>& buffer, int blocks, int threads, long nP, const DMatrix<T>& d_M, MonoidF<T,StateDim> op, T start, cudaStream_t stream )
#endif
{
if(checkDebug(debugExec)) flprintf("reduceAsyncBuffer blocks %d\n", blocks);
reduceLauncher(result, buffer, nP, d_M, op, start, 0, stream);
#ifndef __CUDA_ARCH__
checkCudaError(cudaStreamSynchronize(stream));
#else
__syncthreads();
#endif
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsyncBuffer<maxBinaryOp>( float*,DMatrix<float>&,int, int, long, DMatrix<float> const&, maxBinaryOp<float>, float, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsyncBuffer<minBinaryOp>(float*,DMatrix<float>&,int, int, long, DMatrix<float> const&, minBinaryOp<float>, float, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsyncBuffer<maxBinaryOp>(double*,DMatrix<double>&,int, int, long, DMatrix<double> const&, maxBinaryOp<double>, double, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsyncBuffer<minBinaryOp>(double*,DMatrix<double>&,int, int, long, DMatrix<double> const&, minBinaryOp<double>, double, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsyncBuffer<maxBinaryOp>(ulong*,DMatrix<ulong>&,int, int, long, DMatrix<ulong> const&, maxBinaryOp<ulong>, ulong, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsyncBuffer<minBinaryOp>(ulong*,DMatrix<ulong>&,int, int, long, DMatrix<ulong> const&, minBinaryOp<ulong>, ulong, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsyncBuffer<minBinaryOp>(int*, DMatrix<int>&, int, int, long, DMatrix<int> const&, minBinaryOp<int>, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsyncBuffer<maxBinaryOp>(int*, DMatrix<int>&, int, int, long, DMatrix<int> const&, maxBinaryOp<int>, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<unsigned int>::reduceAsyncBuffer<minBinaryOp>(unsigned int*, DMatrix<unsigned int>&, int, int, long, DMatrix<unsigned int> const&, minBinaryOp<unsigned int>, unsigned int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<unsigned int>::reduceAsyncBuffer<maxBinaryOp>(unsigned int*, DMatrix<unsigned int>&, int, int, long, DMatrix<unsigned int> const&, maxBinaryOp<unsigned int>, unsigned int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceAsyncBuffer<minBinaryOp>(long*, DMatrix<long>&, int, int, long, DMatrix<long> const&, minBinaryOp<long>, long, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceAsyncBuffer<maxBinaryOp>(long*, DMatrix<long>&, int, int, long, DMatrix<long> const&, maxBinaryOp<long>, long, CUstream_st*);
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsyncBuffer(float*, DMatrix<float>&, int, int, long, DMatrix<float> const&, MonoidF<float, 1>, float, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsyncBuffer(double*, DMatrix<double>&, int, int, long, DMatrix<double> const&, MonoidF<double, 1>, double, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsyncBuffer(int*, DMatrix<int>&, int, int, long, DMatrix<int> const&, MonoidF<int, 1>, int, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceAsyncBuffer(uint*, DMatrix<uint>&, int, int, long, DMatrix<uint> const&, MonoidF<uint, 1>, uint, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceAsyncBuffer(long*, DMatrix<long>&, int, int, long, DMatrix<long> const&, MonoidF<long, 1>, long, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsyncBuffer(ulong*, DMatrix<ulong>&, int, int, long, DMatrix<ulong> const&, MonoidF<ulong, 1>, ulong, CUstream_st*);
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE T CuMatrix<T>::reduce(BinaryOp<T> op, T start, cudaStream_t stream ) const
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE T CuMatrix<T>::reduce(MonoidF<T,StateDim> op, T start, cudaStream_t stream ) const
#endif
{
//assert(lastMod != mod_host);
DMatrix<T> d_A;
if(checkDebug(debugRedux) ) flprintf("tiler.m_m %u, tiler.m_n %u, tiler.m_p %u m_size %lu tileSize %ld\n",
tiler.m_m,tiler.m_n,tiler.m_p, tiler.m_size, tiler.tileSize);
int roff=0, coff=0, tileM = 0, tileN = 0, tileP=0;
int tileCount = tiler.getTileCount();
//tiler.tileDims(tileM,tileN,tdRows);
//tileCount = MAX(tileCount, DIV_UP(m,tileM));
T* resA;
T res;
#ifndef __CUDA_ARCH__
cherr(cudaHostAlloc(&resA,tileCount*sizeof(T),0));
#else
resA = (T*) malloc(tileCount*sizeof(T));
#endif
int lastGpu = -1;
int orgDevice = ExecCaps::currDev();
int gpuCount = tiler.countGpus();
if(checkDebug(debugRedux) ) flprintf("orgDev %d gpuCount %d\n",orgDevice, gpuCount);
cudaStream_t* streams = null;
if(gpuCount > 1) {
assert(!stream);
cudaStream_t* streams = (cudaStream_t* ) malloc(gpuCount * sizeof(cudaStream_t));
for(int i =0 ; i < gpuCount; i++) {
lastGpu = tiler.nextGpu(lastGpu);
if(gpuCount> 1)
ExecCaps_setDevice(lastGpu);
//cherr(cudaSetDevice(lastGpu));
cherr(cudaStreamCreateWithFlags(&streams[i],cudaStreamNonBlocking));
}
}
lastGpu = -1;
if(checkDebug(debugRedux) ) flprintf("m %d, n %d, p %d, tileP %d, tiler.getTileCount() %d tileDir %s\n",m, n, p, tileP, tileCount, b_util::tileDir(tiler.tileD));
for(int tile = 0; tile < tileCount; tile++) {
if(gpuCount> 1)
ExecCaps_setDevice(lastGpu);
tiler.tile1D( d_A,roff,coff,tileM, tileN, tileP, tile, tdRows,lastMod == mod_host, lastGpu,gpuCount > 1 ? streams[tile] : stream);
if(checkDebug(debugRedux) ) util<T>::prdm("d_A", d_A);
resA[tile] = reduce(d_A, op, start, gpuCount > 1 ? streams[tile] : stream);
}
if(gpuCount > 1) {
for(int i =0 ; i < gpuCount; i++) {
cherr(cudaStreamDestroy(streams[i]));
}
free(streams);
}
if(tileCount > 1) {
if(checkDebug(debugRedux) ) flprintf("reduce tileCount %d\n",tileCount);
// reduce across tile reductions
T* dres = null;
#ifndef __CUDA_ARCH__
cherr(cudaMalloc(&dres, tileCount *sizeof(T)));
cherr(cudaMemcpy(dres, resA, tileCount*sizeof(T), cudaMemcpyHostToDevice));
#else
dres = resA;
#endif
d_A.elements = dres;
d_A.m = tileCount;
d_A.n = 1; d_A.p = 1;
res = reduce(d_A, op, start, stream);
#ifndef __CUDA_ARCH__
cudaFree(dres);
#endif
} else {
if(checkDebug(debugRedux) ) flprintf("single tile reduction -> %f\n", (float) resA[0]);
res = resA[0];
}
#ifndef __CUDA_ARCH__
if(checkDebug(debugDestr))flprintf("freeing host resA %p\n", resA);
cherr(cudaFreeHost(resA));
#else
free(resA);
#endif
return res;
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<maxBinaryOp>(maxBinaryOp<float>, float, CUstream_st*) const;
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce<minBinaryOp>(minBinaryOp<float>, float, CUstream_st*) const;
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<maxBinaryOp>(maxBinaryOp<double>, double, CUstream_st*) const;
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce<minBinaryOp>(minBinaryOp<double>, double, CUstream_st*) const;
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<maxBinaryOp>(maxBinaryOp<int>, int, CUstream_st*) const;
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce<minBinaryOp>(minBinaryOp<int>, int, CUstream_st*) const;
template __host__ CUDART_DEVICE long CuMatrix<long>::reduce<maxBinaryOp>(maxBinaryOp<long>, long, CUstream_st*) const;
template __host__ CUDART_DEVICE long CuMatrix<long>::reduce<minBinaryOp>(minBinaryOp<long>, long, CUstream_st*) const;
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<maxBinaryOp>(maxBinaryOp<unsigned int>, unsigned int, CUstream_st*) const;
template __host__ CUDART_DEVICE unsigned int CuMatrix<unsigned int>::reduce<minBinaryOp>(minBinaryOp<unsigned int>, unsigned int, CUstream_st*) const;
template __host__ CUDART_DEVICE unsigned long CuMatrix<unsigned long>::reduce<maxBinaryOp>(maxBinaryOp<unsigned long>, unsigned long, CUstream_st*) const;
template __host__ CUDART_DEVICE unsigned long CuMatrix<unsigned long>::reduce<minBinaryOp>(minBinaryOp<unsigned long>, unsigned long, CUstream_st*) const;
#else
template __host__ CUDART_DEVICE float CuMatrix<float>::reduce(MonoidF<float,1>, float, CUstream_st*) const;
template __host__ CUDART_DEVICE double CuMatrix<double>::reduce(MonoidF<double,1>, double, CUstream_st*) const;
template __host__ CUDART_DEVICE int CuMatrix<int>::reduce(MonoidF<int,1>, int, CUstream_st*) const;
template __host__ CUDART_DEVICE uint CuMatrix<uint>::reduce(MonoidF<uint,1>, uint, CUstream_st*) const;
template __host__ CUDART_DEVICE ulong CuMatrix<ulong>::reduce(MonoidF<ulong,1>, ulong, CUstream_st*) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE T CuMatrix<T>::reduceColumn(BinaryOp<T> op, T start, int col, cudaStream_t stream ) const
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE T CuMatrix<T>::reduceColumn(MonoidF<T,StateDim> op, T start, int col, cudaStream_t stream ) const
{
DMatrix<T> d_A;
#endif
T* resA;
T res;
int lastGpu = 0;
int orgDevice = ExecCaps::currDev();
int gpuCount = tiler.countGpus();
int tileCount = tiler.getTileCount();
cudaStream_t* streams = null;
if(gpuCount > 1) {
assert(!stream);
streams = (cudaStream_t* ) malloc(gpuCount * sizeof(cudaStream_t));
for(int i =0 ; i < gpuCount; i++) {
lastGpu = tiler.nextGpu(lastGpu);
#ifndef __CUDA_ARCH__
cherr(cudaSetDevice(lastGpu));
#endif
cherr(cudaStreamCreateWithFlags(&streams[i],cudaStreamNonBlocking));
}
}
lastGpu = tiler.nextGpu(0);
int roff, coff, tileM = 0, tileN = 0, tileP = 0;
#ifndef __CUDA_ARCH__
cherr(cudaHostAlloc(&resA,tileCount*sizeof(T),0));
#else
resA = (T*) malloc(tileCount*sizeof(T));
#endif
for(int tile = 0; tile < tileCount; tile++) {
// tile1D(DMatrix<T>& dm, int& roff, int& coff,int& tileM, int& tileN, int& tileP, int t, TileDirection tileD = tdRows, bool copy = true, int lastGpu = -1, cudaStream_t stream = 0)
tiler.tile1D(d_A, roff, coff, tileM, tileN, tileP, tile, tdRows, true);
reduceColumn(resA + tile, d_A, op, start, col, stream);
}
if(tileCount > 1) {
if(checkDebug(debugRedux) ) flprintf("reduce across %d tile reductions %d\n",tileCount);
// reduce across tile reductions
T* dres = null;
#ifndef __CUDA_ARCH__
cherr(cudaMalloc(&dres, tileCount*sizeof(T)));
cherr(cudaMemcpy(dres, resA, tileCount*sizeof(T), cudaMemcpyHostToDevice));
#else
dres = resA;
#endif
d_A.elements = dres;
d_A.m = tileCount;
d_A.n = 1; d_A.p = 1;
res = reduce(d_A, op, start, stream);
#ifndef __CUDA_ARCH__
cudaFree(dres);
#endif
} else {
if(checkDebug(debugRedux) ) flprintf("single tile reduction -> %f\n", (float) resA[0]);
res = resA[0];
}
#ifndef __CUDA_ARCH__
if(checkDebug(debugDestr))flprintf("freeing host resA %p\n", resA);
cherr(cudaFreeHost(resA));
#else
free(resA);
#endif
return res;
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE float CuMatrix<float>::reduceColumn<plusBinaryOp>(plusBinaryOp<float>, float, int, CUstream_st*) const;
template __host__ CUDART_DEVICE double CuMatrix<double>::reduceColumn<plusBinaryOp>(plusBinaryOp<double>, double, int, CUstream_st*) const;
template __host__ CUDART_DEVICE unsigned long CuMatrix<unsigned long>::reduceColumn<plusBinaryOp>(plusBinaryOp<unsigned long>, unsigned long, int, CUstream_st*) const;
#else
template __host__ CUDART_DEVICE float CuMatrix<float>::reduceColumn<1>(MonoidF<float,1>, float, int, CUstream_st*) const;
template __host__ CUDART_DEVICE double CuMatrix<double>::reduceColumn<1>(MonoidF<double,1>, double, int, CUstream_st*) const;
template __host__ CUDART_DEVICE int CuMatrix<int>::reduceColumn<1>(MonoidF<int,1>, int, int, CUstream_st*) const;
template __host__ CUDART_DEVICE uint CuMatrix<uint>::reduceColumn<1>(MonoidF<uint,1>, uint, int, CUstream_st*) const;
template __host__ CUDART_DEVICE ulong CuMatrix<ulong>::reduceColumn<1>(MonoidF<ulong,1>, ulong, int, CUstream_st*) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BinaryOp> __host__ CUDART_DEVICE void CuMatrix<T>::reduceAsync(T* result, BinaryOp<T> op, T start, cudaStream_t stream ) const
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::reduceAsync(T* result, MonoidF<T,StateDim> op, T start, cudaStream_t stream ) const
#endif
{
reduce(op, start, stream);
}
#ifdef CuMatrix_Enable_KTS
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceAsync<1>(float*, MonoidF<float, 1>, float, CUstream_st*) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceAsync<1>(double*, MonoidF<double, 1>, double, CUstream_st*) const;
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceAsync<1>(int*, MonoidF<int, 1>, int, CUstream_st*) const;
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceAsync<1>(uint*, MonoidF<uint, 1>, uint, CUstream_st*) const;
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceAsync<1>(ulong*, MonoidF<ulong, 1>, ulong, CUstream_st*) const;
#endif
// reduction with addition (Σ)
template<typename T> __host__ CUDART_DEVICE T CuMatrix<T>::sum(cudaStream_t stream ) const {
//T res[factor];
#ifndef __CUDA_ARCH__
if(checkDebug(debugRedux)){
flprintf("this %p %dx%dx%d elems %p dev %p lastMod %s\n", this, m,n,p,elements, tiler.buff(), b_util::modStr(lastMod));
printColoArray<T>(elements,20);
printDevArray<T>(tiler.buff(),"EVE",-1,20);
}
#endif
#ifdef CuMatrix_Enable_KTS
T res = reduce( plusBinaryOp<T>(), 0 , stream);
#else
T res = reduce(Functory<T,plusBinaryOp>::pinch(), 0 , stream);
#endif
return res;
}
template<typename T> __host__ T CuMatrix<T>::kahanSum() const {
if(lastMod == mod_device) {
dthrow(notSyncedHost());
}
T sum = 0;
T c = 0;
for(int i = 0; i < m * p ;i++) {
if(i == m * p -1 ){
if(checkDebug(debugRedux)) outln("last idx " << i << ", (elements + idx) = " << (elements + i));
}
if(i % p < n) { // verify idx inside meat
T y = elements[i] - c;
T t = sum + y;
c = (t - sum) - y;
sum = t;
}/*else {
if(checkDebug(debugRedux)) outln("skipping idx " << i << " ( i %p == ) " << (i %p));
}*/
}
return sum;
}
// reduction with multiplication (Π)
template<typename T> __host__ CUDART_DEVICE T CuMatrix<T>::prod( cudaStream_t stream ) const {
#ifdef CuMatrix_Enable_KTS
T res = reduce( multBinaryOp<T>(), 1.0, stream);
#else
T res = reduce( Functory<T,multBinaryOp>::pinch(), 1.0, stream);
#endif
return res;
}
template<typename T> void CuMatrix<T>::featureMeans( CuMatrix<T>& means, bool lv) const {
DMatrix<T> d_Means, d_X;
int roff, coff;
int tileM, tileN, tileP;
tiler.tileDims(tileM, tileN, tileP, tdCols);
int tileCount = DIV_UP(m,tileM);
for(int i = 0; i < tileCount; i++) {
tiler.tileLike(d_X, roff,coff, tileM, tileN, tileP, i, tdCols, true);
if(checkDebug(debugTiler))prlocf("means tiling");
means.tiler.tileLike(d_Means, roff,coff, tileM, tileN, tileP, i, tdCols, true);
if(vectorQ()) {
means.set(0, sum()/length());
} else {
featureAvgKernelL(d_Means, d_X, lv);
}
means.tiler.syncTile(d_Means, roff, coff);
}
means.invalidateHost();
}
template<typename T> void CuMatrix<T>::featureMeansTx( CuMatrix<T>& means) const {
DMatrix<T> d_Means, d_X;
int roff,coff;
int tileM, tileN, tileP;
tiler.tileDims(tileM, tileN, tileP, tdRows); // todo check this
int tileCount = DIV_UP(m,_tileM);
for(int i = 0; i < tileCount; i++) {
tiler.tileLike(d_X, roff,coff, tileM, tileN, tileP, i, tdRows, true);
means.tiler.tileLike(d_Means, roff,coff, tileM, tileN, tileP, i, tdRows, true);
featureAvgTxdKernelL(d_Means, d_X);
means.tiler.syncTile(d_Means, roff, coff);
}
means.invalidateHost();
}
template<typename T> void CuMatrix<T>::featureMeansStreams( CuMatrix<T>& means, bool lv,int nstreams) const {
DMatrix<T> d_Means, d_X;
int roff, coff;
int tileCount = tiler.getTileCount();
int tileM, tileN, tileP;
tiler.tileDims(tileM, tileN, tileP, tdCols); // todo check this
for(int i = 0; i < tileCount; i++) {
tiler.tileLike(d_X, roff,coff, tileM, tileN, tileP, i, tdCols, true);
means.tiler.tileLike(d_Means, roff,coff, tileM, tileN, tileP, i, tdCols, true);
//outln("d_Means " << util<T>::pdm(d_Means));
featureAvgMultiStreamL(d_Means, d_X, lv, nstreams);
means.tiler.syncTile(d_Means, roff, coff);
}
means.invalidateHost();
}
template<typename T> CuMatrix<T> CuMatrix<T>::featureMeans(bool lv) const {
CuMatrix<T> means = zeros(n, 1);
featureMeans(means, lv);
return means;
}
// matrix is transposed and average calced by doing sum-reductions of each row
// one thread (in x dir) for each row
template<typename T> __global__ void rowSumKernel2(DMatrix<T> sums, const DMatrix<T> xTxd) {
uint rowIdx = blockIdx.x * blockDim.x + threadIdx.x; // index into column of un-txd source matrix
if(checkDebug(debugRedux) && rowIdx == 0) {
util<T>::printRow(xTxd, rowIdx);
}
__syncthreads();
#ifdef CuMatrix_Enable_Cdp
cherr(cudaPeekAtLastError());
#endif
// T* sdata = SharedMemory<T>();
DMatrix<T> row(xTxd.elements + xTxd.p * rowIdx, 1, xTxd.n);
__syncthreads();
#ifdef CuMatrix_Enable_Cdp
cherr(cudaPeekAtLastError());
#endif
if(checkDebug(debugRedux) && rowIdx == xTxd.m - 1) {
prlocf("last row as ");
util<T>::printRow(row, 0);
}
if(rowIdx < xTxd.m) {
if(checkDebug(debugRedux)) {
flprintf("reducing row %d\n",rowIdx);
}
T sum = 0;
#ifdef CuMatrix_Enable_Cdp
sum = CuMatrix<T>::reduce(row, Functory<T,plusBinaryOp>::pinch(),0);
// flprintf("sum %g\n", sum);
#else
prlocf("not implemented for non-cdp\n");
assert(false);
#endif
if(checkDebug(debugRedux)) {
flprintf("row %d sum %f\n",rowIdx, sum);
}
sums.elements[rowIdx]= sum;
}
__syncthreads();
#ifdef CuMatrix_Enable_Cdp
cherr(cudaPeekAtLastError());
#endif
}
// sum reduces each row
template<typename T> __host__ CUDART_DEVICE void CuMatrix<T>::rowSum(DMatrix<T>& d_rowSums, const DMatrix<T>& d_x, cudaStream_t stream) {
cherr(cudaPeekAtLastError());
uint blockX = MIN(256, d_x.m);
uint gridX = blockX >= d_x.m ? 1 : DIV_UP(d_x.m,blockX);
if(checkDebug(debugRedux)){
prlocf("rowSum on ");
util<T>::prdm(d_x);
flprintf(" with gridX %d and blockX %d\n",gridX,blockX);
}
//b_util::pFuncPtrAtts((T*)rowSumKernel2<T>);
bool valid = b_util::validLaunchQ((void*)rowSumKernel2<T>,dim3(gridX), dim3(blockX));
if(checkDebug(debugRedux))flprintf("valid %s\n", tOrF(valid));
rowSumKernel2<<<gridX, blockX, 0, stream>>>(d_rowSums, d_x);
#ifdef __CUDA_ARCH__
__syncthreads();
#else
cherr(cudaDeviceSynchronize());
#endif
}
//template<typename T> __global__ smallestMutalFactor(uint* factor, )
/*
* works but bad shuffln
template<typename T, int StateDim> __global__ void rowReductionKernelNlte64(DMatrix<T> resVec, MonoidF bop, const DMatrix<T> x, uint slice, uint slices) {
assert(x.n < 65);
// shared mem to relay partial sums of rows that span warps
int col = threadIdx.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if(row < x.m && col < x.n) {
// test for elements processed by by cols with laneid > WARP_SIZE
int2 laneid = b_util::laneId(threadIdx, blockIdx, blockDim);
ulong soffset = slice * x.m * x.p/slices;
uint toffset = slice * x.m * resVec.p/slices;
int currLen = x.n - 1;
T* xrow = x.elements + soffset + row * x.p;
// first reduction fills warp
T s = col < x.n ? xrow[col] : bop.identity;
if( col + WARP_SIZE < x.n )
s = bop(s,xrow[col + WARP_SIZE]);
__syncthreads();
while(currLen > 0) {
int dLane = col == 0 ? currLen : 0;
if(WARP_SIZE - laneid.y < x.n - col) {
flnfprintf("row %u laneid %u.%u y + x.n-col %d > ws\n", row, laneid.x,laneid.y, x.n-col);
// on a spanrow, so explicitly load elems of spanrow seg in 2nd warp
if(col + dLane < x.n) {
flnfprintf("loading lane %u.%u from next warps for row %u col %u s was %f\n", laneid.x + 1 , WARP_SIZE - (laneid.y + dLane), row,col,s);
s = bop(s, xrow[col+dLane]);
flnfprintf("s is %f\n",s);
} else {
flnfprintf("ignoring lane %u from next warps for row %u col %u\n" , (laneid.y + dLane - WARP_SIZE), row,col);
}
} else {
T os = shflDown<T>(s, dLane );
// (laneid<x.n && laneid > col) skips elems of segment of spanrow in 2nd warp
s = col == 0 ? bop(s, os) : s;
if(checkDebug(debugRedux)) flnfprintf("x.n %u row %u col %u lane id %u.%u dLane %d os %f s %f\n", x.n, row, col, laneid.x,laneid.y, dLane, os,s);
}
currLen--;
}
if(col == 0) {
resVec.elements[row + toffset] = s;
}
}
}
*/
/*
* todo version for well-formed matrices
* needs smem T for each spanrow that holds partial reduction of 1st part of row
*/
#ifdef CuMatrix_Enable_KTS
template<typename T, template <typename> class BinaryOpF> __global__
void rowReductionKernelNlte64(DMatrix<T> resVec, BinaryOpF<T> bop, const DMatrix<T> x, uint slice, uint slices)
#else
template<typename T, int StateDim> __global__
void rowReductionKernelNlte64(DMatrix<T> resVec, MonoidF<T,StateDim> bop, const DMatrix<T> x, uint slice, uint slices)
#endif
{
assert(x.n < 65);
// shared mem to relay partial sums of rows that span warps
int col = threadIdx.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
// thread in mat bounds
if(row < x.m && col < x.n) {
uint laneid;
b_util::laneid(laneid);
ulong soffset = slice * x.m * x.p;
uint toffset = slice * x.m * resVec.p;
if(checkDebug(debugRedux) && col == 0 && row == 0) {
flprintf("slice %u soffset %lu toffset %u\n", slice, soffset, toffset);
}
int currLen = MIN(WARP_SIZE/2, b_util::nextPowerOf2(x.n)/2);
T* xrow = x.elements + soffset + row * x.p;
// first reduction fills warp local ses
T s = col < x.n ? xrow[col] : bop.identity_ro();
if( col + WARP_SIZE < x.n )
s = bop(s,xrow[col + WARP_SIZE]);
__syncthreads();
if(col == 0 && x.n > WARP_SIZE - laneid && x.n < WARP_SIZE) {
// row spans warps, so don't shuffle
for(int i =1; i < x.n; i++) {
s = bop(s, xrow[col + i]);
}
} else {
while(currLen > 0) {
int dLane = currLen; // col == 0 ? currLen : 0;
// check for beginning of a warp spanrow
T os = shflDown<T>(s, dLane);
// (laneid<x.n && laneid > col) skips elems of segment of spanrow in 2nd warp
if(col > 0 && col > laneid ) {
// todo retrieve partial redux of 1st part of span row and reduce rest of spanrow with that
//if(checkDebug(debugRedux)) flnfprintf("skipn x.n %u row %u col %u lane id %u dLane %d os %f s %f\n", x.n, row, col, laneid, dLane, os,s);
} else if(col + dLane < x.n){
s = bop(s, os);
//if(checkDebug(debugRedux)) flnfprintf("bopn x.n %u row %u col %u lane id %u dLane %d os %f s %f\n", x.n, row, col, laneid, dLane, os,s);
}
currLen >>= 1;
}
}
if(col == 0) {
resVec.elements[row + toffset] = s;
}
}
}
//
// 'slices' are row-wise
// stripes of 64 cols are col-wise
#ifdef CuMatrix_Enable_KTS
template<typename T, template <typename> class BinaryOpF> __global__ void rowReductionKernel(DMatrix<T> resVec, BinaryOpF<T> bop, const DMatrix<T> x, int slice, int slices, int stripes)
#else
template<typename T, int StateDim> __global__ void rowReductionKernel(DMatrix<T> resVec, MonoidF<T,StateDim> bop, const DMatrix<T> x, int slice, int slices, int stripes)
#endif
{
int col = threadIdx.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
ulong soffset = slice * x.m * x.p;
int toffset = slice * x.m * resVec.p;
for(int stripe = 0; stripe < stripes; stripe++) {
int currLen = MIN(WARP_SIZE/2, b_util::nextPowerOf2(x.n)/2);
T* xrow = x.elements + soffset + row * x.p + stripe * 64;
// first reduction fills warp
T s = col < x.n ? xrow[col] : bop.identity_ro();
if( col + WARP_SIZE < x.n )
s = bop(s,xrow[col + WARP_SIZE]);
__syncthreads();
while(currLen > 0) {
int lane = col + currLen;
s = bop(s, shfl<T>(s, lane));
currLen >>= 1;
}
if(threadIdx.x == 0) {
resVec.elements[row * resVec.p + stripe + toffset] = s;
}
}
__syncthreads();
assert(stripes < 65);
int currLen = MIN(WARP_SIZE/2, b_util::nextPowerOf2(stripes)/2);
T* resVecRow = resVec.elements + toffset + row * resVec.p;
// first reduction fills warp
T s = col < stripes ? resVecRow[col] : bop.identity_ro();
if( col + WARP_SIZE < x.n )
s = bop(s,resVecRow[col + WARP_SIZE]);
__syncthreads();
while(currLen > 0) {
int lane = col + currLen;
s = bop(s, shfl<T>(s, lane));
currLen >>= 1;
}
if(threadIdx.x == 0) {
resVec.elements[row + toffset] = s;
}
}
#ifdef CuMatrix_Enable_KTS
template <typename T> template <template <typename> class BinaryOp> __host__ CUDART_DEVICE void CuMatrix<T>::reduceRowsNlte64(DMatrix<T>& resVec, const DMatrix<T>& d_x, BinaryOp<T> op, cudaStream_t stream )
#else
template <typename T> template <int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::reduceRowsNlte64(DMatrix<T>& resVec, const DMatrix<T>& d_x, MonoidF<T,StateDim> op, cudaStream_t stream )
#endif
{
assert(d_x.n <= WARP_SIZE*2);
ExecCaps * pcaps = ExecCaps::currCaps();
uint blockW= MIN(WARP_SIZE, d_x.n);
uint blockH = MIN(d_x.m, maxH<T>(*pcaps,blockW));
if(checkDebug(debugRedux))flprintf("for d_x %ux%ux%u first %p last %p\n", d_x.m,d_x.n, d_x.p, d_x.elements, d_x.elements + (d_x.m -1)*d_x.p + d_x.n -1);
if(checkDebug(debugRedux))flprintf("blockH %d maxH<T>(*pcaps,blockW) %d\n",blockH, maxH<T>(*pcaps,blockW));
int gridY = DIV_UP(d_x.m, blockH);
dim3 grid(1, gridY);
// in case grid y is too big
int slices = DIV_UP(grid.y, pcaps->maxGrid.y);
if(checkDebug(debugRedux))flprintf("slices %d\n",slices);
dim3 block(blockW,blockH);
int sliceGridY = grid.y/ slices;
DMatrix<T> d_slice(d_x);
d_slice.m = sliceGridY * blockH;
if(checkDebug(debugRedux))flprintf("init sliceGridY %d d_slice.m %d\n",sliceGridY, d_slice.m);
int offset;
for(int currSlice =0; currSlice < slices; currSlice++) {
offset = currSlice * d_slice.m * d_x.p ;
if(currSlice == slices - 1) {
if(checkDebug(debugRedux))prlocf("last fill slice");
d_slice.m = d_x.m - (slices - 1 ) * d_slice.m;
sliceGridY = DIV_UP(d_slice.m, blockH);
}
grid.y = sliceGridY;
if(checkDebug(debugRedux)){
flprintf("sliceGridY %d\n",sliceGridY);
flprintf("slice %d on mat offset %d %dX%d(X%d) (d_slice.elements 1st %p last %p)\n",
currSlice, offset, d_slice.m, d_slice.n, d_slice.p, d_slice.elements+ offset,d_slice.elements+ offset + (d_slice.m-1)* d_slice.p + d_slice.n -1);
b_util::prd3(grid, " grid of ");
b_util::prd3(block, "block of");
}
rowReductionKernelNlte64<<<grid, block, 0, stream>>>(resVec, op, d_slice, currSlice, slices);
}
cherr(cudaDeviceSynchronize());
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceRows<plusBinaryOp>(DMatrix<float>&, DMatrix<float> const&, plusBinaryOp<float>, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceRows<plusBinaryOp>(DMatrix<double>&, DMatrix<double> const&, plusBinaryOp<double>, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceRows<plusBinaryOp>(DMatrix<long>&, DMatrix<long> const&, plusBinaryOp<long>, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceRows<plusBinaryOp>(DMatrix<ulong>&, DMatrix<ulong> const&, plusBinaryOp<ulong>, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceRows<plusBinaryOp>(DMatrix<int>&, DMatrix<int> const&, plusBinaryOp<int>, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<unsigned int>::reduceRows<plusBinaryOp>(DMatrix<unsigned int>&, DMatrix<unsigned int> const&, plusBinaryOp<unsigned int>, CUstream_st*);
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::reduceRows(DMatrix<float>&, DMatrix<float> const&, MonoidF<float,1>, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<double>::reduceRows(DMatrix<double>&, DMatrix<double> const&, MonoidF<double,1>, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<long>::reduceRows(DMatrix<long>&, DMatrix<long> const&, MonoidF<long,1>, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<ulong>::reduceRows(DMatrix<ulong>&, DMatrix<ulong> const&, MonoidF<ulong,1>, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<int>::reduceRows(DMatrix<int>&, DMatrix<int> const&, MonoidF<int,1>, CUstream_st*);
template __host__ CUDART_DEVICE void CuMatrix<uint>::reduceRows(DMatrix<uint>&, DMatrix<uint> const&, MonoidF<uint,1>, CUstream_st*);
#endif
#ifdef CuMatrix_Enable_KTS
template <typename T> template <template <typename> class BinaryOp> __host__ CUDART_DEVICE void CuMatrix<T>::reduceRows(DMatrix<T>& resVec, const DMatrix<T>& d_x, BinaryOp<T> op, cudaStream_t stream )
#else
template <typename T> template <int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::reduceRows(DMatrix<T>& resVec, const DMatrix<T>& d_x, MonoidF<T,StateDim> op, cudaStream_t stream )
#endif
{
if(d_x.n < 65) {
reduceRowsNlte64(resVec,d_x,op, stream);
}
}
#include "CuMatrixInster.cu"
|
fbc4eab2647503ae0c91c69283f620c583e0ae57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/arithmetic/relation.h"
#include "ppl/common/types.h"
#include <hip/hip_fp16.h>
enum RelationOpType {
Relation_Unknown = 0,
Relation_Equal,
Relation_Greater,
Relation_Less,
Relation_OpNum,
Relation_ForceWord = INT_MAX,
};
struct half8_ {
half x0;
half y0;
half z0;
half w0;
half x1;
half y1;
half z1;
half w1;
};
struct bool8_ {
bool x0;
bool y0;
bool z0;
bool w0;
bool x1;
bool y1;
bool z1;
bool w1;
};
template<RelationOpType op_type, typename T>
__device__ inline bool ppl_relation_scalar(T a, T b);
template<> __device__ inline bool ppl_relation_scalar<Relation_Equal, float>(float a, float b) {
return fabsf(a - b) < 1e-6;
}
template<> __device__ inline bool ppl_relation_scalar<Relation_Greater, float>(float a, float b) {
return a > b;
}
template<> __device__ inline bool ppl_relation_scalar<Relation_Less, float>(float a, float b) {
return a < b;
}
template<> __device__ inline bool ppl_relation_scalar<Relation_Equal, int64_t>(int64_t a, int64_t b) {
return a == b;
}
template<> __device__ inline bool ppl_relation_scalar<Relation_Greater, int64_t>(int64_t a, int64_t b) {
return a > b;
}
template<> __device__ inline bool ppl_relation_scalar<Relation_Less, int64_t>(int64_t a, int64_t b) {
return a < b;
}
template<RelationOpType op_type>
__device__ inline bool ppl_relation_scalar_fp16(half a, half b);
template <>
__device__ inline bool ppl_relation_scalar_fp16<Relation_Equal>(half a, half b)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
return __heq(a, b);
#else
return 0;
#endif
}
template <>
__device__ inline bool ppl_relation_scalar_fp16<Relation_Greater>(half a, half b)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
return __hgt(a, b);
#else
return 0;
#endif
}
template <>
__device__ inline bool ppl_relation_scalar_fp16<Relation_Less>(half a, half b)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
return __hlt(a, b);
#else
return 0;
#endif
}
template <RelationOpType op_type>
static __device__ inline bool ppl_relation_vector_fp16(half a, half b)
{
bool res;
res = ppl_relation_scalar_fp16<op_type>(a, b);
return res;
}
template <RelationOpType op_type>
static __device__ inline bool8_ ppl_relation_vector_fp16(half8_ a, half8_ b)
{
bool8_ res;
res.x0 = ppl_relation_scalar_fp16<op_type>(a.x0, b.x0);
res.y0 = ppl_relation_scalar_fp16<op_type>(a.y0, b.y0);
res.z0 = ppl_relation_scalar_fp16<op_type>(a.z0, b.z0);
res.w0 = ppl_relation_scalar_fp16<op_type>(a.w0, b.w0);
res.x1 = ppl_relation_scalar_fp16<op_type>(a.x1, b.x1);
res.y1 = ppl_relation_scalar_fp16<op_type>(a.y1, b.y1);
res.z1 = ppl_relation_scalar_fp16<op_type>(a.z1, b.z1);
res.w1 = ppl_relation_scalar_fp16<op_type>(a.w1, b.w1);
return res;
}
static void ppl_pad_tensor_shape(const ppl::nn::TensorShape *tensor_shape0,
const ppl::nn::TensorShape *tensor_shape1,
ppl::nn::TensorShape *pad_tensor_shape0,
ppl::nn::TensorShape *pad_tensor_shape1) {
int max_dims = ::max(tensor_shape0->GetDimCount(), tensor_shape1->GetDimCount());
if (pad_tensor_shape0->GetDimCount() < pad_tensor_shape1->GetDimCount()) {
pad_tensor_shape0->SetDimCount(max_dims);
// pad 1 to shape_min_pad's higher dim
int offset = max_dims - tensor_shape0->GetDimCount();
for (int i = 0; i < offset; i++) {
pad_tensor_shape0->SetDim(i, 1);
}
for (int i = offset; i < max_dims; i++) {
pad_tensor_shape0->SetDim(i, tensor_shape0->GetDim(i - offset));
}
} else {
pad_tensor_shape1->SetDimCount(max_dims);
// pad 1 to shape_min_pad's higher dim
int offset = max_dims - tensor_shape1->GetDimCount();
for (int i = 0; i < offset; i++) {
pad_tensor_shape1->SetDim(i, 1);
}
for (int i = offset; i < max_dims; i++) {
pad_tensor_shape1->SetDim(i, tensor_shape1->GetDim(i - offset));
}
}
}
static int ppl_get_num_broadcast_dims(const ppl::nn::TensorShape *tensor_shape0,
const ppl::nn::TensorShape *tensor_shape1,
int &aixs) {
ppl::nn::TensorShape pad_tensor_shape0 = *tensor_shape0;
ppl::nn::TensorShape pad_tensor_shape1 = *tensor_shape1;
ppl_pad_tensor_shape(tensor_shape0, tensor_shape1,
&pad_tensor_shape0, &pad_tensor_shape1);
int dim_count = pad_tensor_shape0.GetDimCount();
int num_broadcast_dims = 0;
for(int it = 0; it < dim_count; ++it) {
if (pad_tensor_shape0.GetDim(it) != pad_tensor_shape1.GetDim(it))
++num_broadcast_dims;
}
if (num_broadcast_dims == 1) {
for(int it = 0; it < dim_count; ++it) {
if (pad_tensor_shape0.GetDim(it) != pad_tensor_shape1.GetDim(it))
aixs = it;
}
}
return num_broadcast_dims;
}
void ppl_relation_prepare_strides(
const ppl::nn::TensorShape* tensor_shape0,
const ppl::nn::TensorShape* tensor_shape1,
const ppl::nn::TensorShape* tensor_shape_out,
const int packed_channel,
uint32_t* stride_in0,
uint32_t* stride_in1,
uint32_t* stride_out)
{
ppl::nn::TensorShape pad_tensor_shape0 = *tensor_shape0;
ppl::nn::TensorShape pad_tensor_shape1 = *tensor_shape1;
int max_dims = tensor_shape_out->GetDimCount();
if (pad_tensor_shape0.GetDimCount() < pad_tensor_shape1.GetDimCount()) {
pad_tensor_shape0.SetDimCount(max_dims);
// pad 1 to shape_min_pad's higher dim
int offset = max_dims - tensor_shape0->GetDimCount();
for (int i = 0; i < offset; i++) {
pad_tensor_shape0.SetDim(i, 1);
}
for (int i = offset; i < max_dims; i++) {
pad_tensor_shape0.SetDim(i, tensor_shape0->GetDim(i - offset));
}
} else {
pad_tensor_shape1.SetDimCount(max_dims);
// pad 1 to shape_min_pad's higher dim
int offset = max_dims - tensor_shape1->GetDimCount();
for (int i = 0; i < offset; i++) {
pad_tensor_shape1.SetDim(i, 1);
}
for (int i = offset; i < max_dims; i++) {
pad_tensor_shape1.SetDim(i, tensor_shape1->GetDim(i - offset));
}
}
const int dimCount = tensor_shape_out->GetDimCount();
uint32_t stride0 = 1;
uint32_t stride1 = 1;
uint32_t stride_out0 = 1;
for (int i = dimCount - 1; i >= 0; i--) {
stride_in0[i] = pad_tensor_shape0.GetDim(i) == 1 ? 0 : stride0;
stride_in1[i] = pad_tensor_shape1.GetDim(i) == 1 ? 0 : stride1;
stride_out[i] = stride_out0;
if (i == 1) { // for channel dim, div packed_channel
stride0 *= (pad_tensor_shape0.GetDim(i) + packed_channel - 1) / packed_channel;
stride1 *= (pad_tensor_shape1.GetDim(i) + packed_channel - 1) / packed_channel;
stride_out0 *= (tensor_shape_out->GetDim(i) + packed_channel - 1) / packed_channel;
} else {
stride0 *= pad_tensor_shape0.GetDim(i);
stride1 *= pad_tensor_shape1.GetDim(i);
stride_out0 *= tensor_shape_out->GetDim(i);
}
}
}
void ppl_relation_prepare_strides_nhwc(
const ppl::nn::TensorShape *tensor_shape0,
const ppl::nn::TensorShape *tensor_shape1,
const ppl::nn::TensorShape *tensor_shape_out,
const int packed_channel,
uint32_t *stride_in0,
uint32_t *stride_in1,
uint32_t *stride_out)
{
if (tensor_shape0->GetDimCount() < 2 || tensor_shape1->GetDimCount() < 2) return;
ppl::nn::TensorShape pad_tensor_shape0 = *tensor_shape0;
ppl::nn::TensorShape pad_tensor_shape1 = *tensor_shape1;
int max_dims = tensor_shape_out->GetDimCount();
if (pad_tensor_shape0.GetDimCount() < pad_tensor_shape1.GetDimCount()) {
pad_tensor_shape0.SetDimCount(max_dims);
// pad 1 to shape_min_pad's higher dim
int offset = max_dims - tensor_shape0->GetDimCount();
for (int i = 0; i < offset; i++) {
pad_tensor_shape0.SetDim(i, 1);
}
for (int i = offset; i < max_dims; i++) {
pad_tensor_shape0.SetDim(i, tensor_shape0->GetDim(i - offset));
}
} else {
pad_tensor_shape1.SetDimCount(max_dims);
// pad 1 to shape_min_pad's higher dim
int offset = max_dims - tensor_shape1->GetDimCount();
for (int i = 0; i < offset; i++) {
pad_tensor_shape1.SetDim(i, 1);
}
for (int i = offset; i < max_dims; i++) {
pad_tensor_shape1.SetDim(i, tensor_shape1->GetDim(i - offset));
}
}
const int dimCount = tensor_shape_out->GetDimCount();
uint32_t stride0 = 1;
uint32_t stride1 = 1;
uint32_t stride_out0 = 1;
for (int stride_pos = dimCount - 1; stride_pos >= 0; stride_pos--) {
int i = stride_pos;
if (stride_pos == dimCount - 1) i = 1;
else if (stride_pos == 0) i = 0;
else i = stride_pos + 1;
stride_in0[stride_pos] = pad_tensor_shape0.GetDim(i) == 1 ? 0 : stride0;
stride_in1[stride_pos] = pad_tensor_shape1.GetDim(i) == 1 ? 0 : stride1;
stride_out[stride_pos] = stride_out0;
if (i == 1) { // for channel dim, div packed_channel
stride0 *= (pad_tensor_shape0.GetDim(i) + packed_channel - 1) / packed_channel;
stride1 *= (pad_tensor_shape1.GetDim(i) + packed_channel - 1) / packed_channel;
stride_out0 *= (tensor_shape_out->GetDim(i) + packed_channel - 1) / packed_channel;
} else {
stride0 *= pad_tensor_shape0.GetDim(i);
stride1 *= pad_tensor_shape1.GetDim(i);
stride_out0 *= tensor_shape_out->GetDim(i);
}
}
}
#define MAXDIMENSIONS 7
struct RelationParam {
uint32_t stride_in0[MAXDIMENSIONS];
uint32_t stride_in1[MAXDIMENSIONS];
uint32_t stride_out[MAXDIMENSIONS];
};
template <RelationOpType op_type, typename T1, typename T2>
__global__ void ppl_cukernel_relation_fp16(
const uint64_t num_elems,
const int dim_count,
RelationParam param,
const T1* input0,
const T1* input1,
T2* output)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
uint64_t out_index = index;
uint64_t offset0 = 0;
uint64_t offset1 = 0;
for (int i = 0; i < dim_count; i++) {
uint64_t dim_off = index / param.stride_out[i];
offset0 += dim_off * param.stride_in0[i];
offset1 += dim_off * param.stride_in1[i];
index = index % param.stride_out[i];
}
output[out_index] = ppl_relation_vector_fp16<op_type>(input0[offset0], input1[offset1]);
#endif
}
template<RelationOpType op_type, typename T>
__global__ void ppl_cukernel_relation(
const uint64_t num_elems,
const int dim_count,
RelationParam param,
const T *input0,
const T* input1,
bool *output) {
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems) return;
uint64_t out_index = index;
uint64_t offset0 = 0;
uint64_t offset1 = 0;
for (int i = 0; i < dim_count; i++) {
uint64_t dim_off = index / param.stride_out[i];
offset0 += dim_off * param.stride_in0[i];
offset1 += dim_off * param.stride_in1[i];
index = index % param.stride_out[i];
}
output[out_index] = ppl_relation_scalar<op_type, T>(input0[offset0], input1[offset1]);
}
template<RelationOpType op_type, typename T>
__global__ void ppl_cukernel_relation_naive(
const uint64_t num_elems,
const T *input0,
const T* input1,
bool *output) {
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems) return;
output[index] = ppl_relation_scalar<op_type, T>(input0[index], input1[index]);
}
template<RelationOpType op_type, typename T>
__global__ void ppl_cukernel_relation_one_scalar(
const uint64_t num_elems,
const bool first_shorter,
const T *input0,
const T* input1,
bool *output) {
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems) return;
int calc_index = 0;
uint64_t offset0 = first_shorter ? calc_index : index;
uint64_t offset1 = first_shorter ? index : calc_index;
output[index] = ppl_relation_scalar<op_type, T>(input0[offset0], input1[offset1]);
}
template<RelationOpType op_type, typename T>
__global__ void ppl_cukernel_relation_one_broadcast(
const uint64_t num_elems,
const int outer_stride,
const int inner_dim,
const bool first_shorter,
const T *input0,
const T* input1,
bool *output) {
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems) return;
int inner_idx = index % inner_dim;
int outer_idx = index / outer_stride;
uint64_t calc_index = outer_idx * inner_dim + inner_idx;
uint64_t offset0 = first_shorter ? calc_index : index;
uint64_t offset1 = first_shorter ? index : calc_index;
output[index] = ppl_relation_scalar<op_type, T>(input0[offset0], input1[offset1]);
}
template <RelationOpType op_type>
ppl::common::RetCode PPLCUDARelationForwardImpFp16(
hipStream_t stream,
const ppl::nn::TensorShape* input_shape0,
const half* input0,
const ppl::nn::TensorShape* input_shape1,
const half* input1,
const ppl::nn::TensorShape* output_shape,
bool* output)
{
RelationParam param;
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int dim_count = output_shape->GetDimCount();
int block_size = 256;
#define SWITCH_CASE(FORMAT, TYPE, TYPE2, SHIFT, PACKED) \
case FORMAT: { \
int channel_shift = SHIFT; \
int packed_channel = PACKED; \
uint64_t grid_size = ((num_elems >> channel_shift) + block_size - 1) / block_size; \
ppl_relation_prepare_strides(input_shape0, input_shape1, output_shape, packed_channel, param.stride_in0, param.stride_in1, param.stride_out); \
hipLaunchKernelGGL(( ppl_cukernel_relation_fp16<op_type, TYPE, TYPE2>), dim3(grid_size), \
block_size, \
0, \
stream, num_elems >> channel_shift, dim_count, param, (const TYPE*)input0, (const TYPE*)input1, (TYPE2*)output); \
return ppl::common::RC_SUCCESS; \
}
switch (output_shape->GetDataFormat()) {
SWITCH_CASE(ppl::common::DATAFORMAT_NDARRAY, half, bool, 0, 1);
case ppl::common::DATAFORMAT_NHWC: {
bool can_broadcast = (input_shape0->GetDimCount() >= 2) && (input_shape1->GetDimCount() >= 2);
if (!can_broadcast) return ppl::common::RC_UNSUPPORTED;
if ((input_shape0->GetDim(1) & 0x7) || (input_shape1->GetDim(1) & 0x7)) return ppl::common::RC_UNSUPPORTED;
int channel_shift = 3;
int packed_channel = 8;
uint64_t grid_size = ((num_elems >> channel_shift) + block_size - 1) / block_size;
ppl_relation_prepare_strides_nhwc(input_shape0, input_shape1, output_shape, packed_channel, param.stride_in0, param.stride_in1, param.stride_out);
hipLaunchKernelGGL(( ppl_cukernel_relation_fp16<op_type, half8_, bool8_>), dim3(grid_size), dim3(block_size), 0, stream, num_elems >> channel_shift, dim_count, param, (const half8_ *)input0, (const half8_ *)input1, (bool8_ *)output);
return ppl::common::RC_SUCCESS;
}
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
template<RelationOpType op_type, typename T>
ppl::common::RetCode PPLCUDARelationForwardImp(
hipStream_t stream,
const ppl::nn::TensorShape* input_shape0,
const T *input0,
const ppl::nn::TensorShape* input_shape1,
const T *input1,
const ppl::nn::TensorShape* output_shape,
bool *output) {
RelationParam param;
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int dim_count = output_shape->GetDimCount();
int block_size = 256;
int axis = 0;
int num_broadcast_dims = ppl_get_num_broadcast_dims(input_shape0, input_shape1, axis);
if (num_broadcast_dims == 0) {
uint64_t grid_size = (num_elems + block_size - 1) / block_size;
hipLaunchKernelGGL(( ppl_cukernel_relation_naive<op_type, T>), dim3(grid_size),
dim3(block_size), 0, stream, num_elems,
(const T*)input0, (const T*)input1, (bool*)output);
} else if (num_broadcast_dims == dim_count) {
uint64_t grid_size = (num_elems + block_size - 1) / block_size;
bool first_shorter = false;
if (input_shape0->GetRealDimCount() == input_shape1->GetRealDimCount() &&
input_shape0->GetDim(axis) < input_shape1->GetDim(axis)) {
first_shorter = true;
}
if (input_shape0->GetRealDimCount() < input_shape1->GetRealDimCount()) {
first_shorter = true;
}
hipLaunchKernelGGL(( ppl_cukernel_relation_one_scalar<op_type, T>), dim3(grid_size),
dim3(block_size), 0, stream, num_elems, first_shorter,
(const T*)input0, (const T*)input1, (bool*)output);
} else if (num_broadcast_dims == 1) {
int inner_dim = 1;
uint64_t grid_size = (num_elems + block_size - 1) / block_size;
for(int it = axis + 1; it < dim_count; inner_dim *= output_shape->GetDim(it), ++it);
int outer_stride = inner_dim * output_shape->GetDim(axis);
bool first_shorter = false;
if (input_shape0->GetRealDimCount() == input_shape1->GetRealDimCount() &&
input_shape0->GetDim(axis) < input_shape1->GetDim(axis)) {
first_shorter = true;
}
if (input_shape0->GetRealDimCount() < input_shape1->GetRealDimCount()) {
first_shorter = true;
}
hipLaunchKernelGGL(( ppl_cukernel_relation_one_broadcast<op_type, T>), dim3(grid_size),
dim3(block_size), 0, stream, num_elems, outer_stride, inner_dim, first_shorter,
(const T*)input0, (const T*)input1, (bool*)output);
} else {
int packed_channel = 1;
uint64_t grid_size = (num_elems + block_size - 1) / block_size;
ppl_relation_prepare_strides(input_shape0, input_shape1,
output_shape, packed_channel, param.stride_in0, param.stride_in1, param.stride_out);
hipLaunchKernelGGL(( ppl_cukernel_relation<op_type, T>), dim3(grid_size),
dim3(block_size), 0, stream, num_elems, dim_count, param,
(const T*)input0, (const T*)input1, (bool*)output);
}
return ppl::common::RC_SUCCESS;
}
#define INSTANT(OPTYPE) \
ppl::common::RetCode PPLCUDARelation##OPTYPE##ForwardImp( \
hipStream_t stream, \
const ppl::nn::TensorShape* input_shape0, \
const void *input0, \
const ppl::nn::TensorShape* input_shape1, \
const void *input1, \
const ppl::nn::TensorShape* output_shape, \
bool *output) { \
if (input_shape0->GetDataType() == ppl::common::DATATYPE_FLOAT16) { \
return PPLCUDARelationForwardImpFp16<Relation_##OPTYPE>(stream, \
input_shape0, (const half*)input0, input_shape1, \
(const half*)input1, output_shape, output); \
} else if (input_shape0->GetDataType() == ppl::common::DATATYPE_FLOAT32) { \
return PPLCUDARelationForwardImp<Relation_##OPTYPE, float>(stream, \
input_shape0, (const float*)input0, input_shape1, \
(const float*)input1, output_shape, output); \
} else if (input_shape0->GetDataType() == ppl::common::DATATYPE_INT64) { \
return PPLCUDARelationForwardImp<Relation_##OPTYPE, int64_t>(stream, \
input_shape0, (const int64_t*)input0, input_shape1, \
(const int64_t*)input1, output_shape, output); \
} else { \
return ppl::common::RC_UNSUPPORTED; \
} \
}
INSTANT(Equal);
INSTANT(Greater);
INSTANT(Less);
#undef INSTANT
| fbc4eab2647503ae0c91c69283f620c583e0ae57.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/arithmetic/relation.h"
#include "ppl/common/types.h"
#include <cuda_fp16.h>
enum RelationOpType {
Relation_Unknown = 0,
Relation_Equal,
Relation_Greater,
Relation_Less,
Relation_OpNum,
Relation_ForceWord = INT_MAX,
};
struct half8_ {
half x0;
half y0;
half z0;
half w0;
half x1;
half y1;
half z1;
half w1;
};
struct bool8_ {
bool x0;
bool y0;
bool z0;
bool w0;
bool x1;
bool y1;
bool z1;
bool w1;
};
template<RelationOpType op_type, typename T>
__device__ inline bool ppl_relation_scalar(T a, T b);
template<> __device__ inline bool ppl_relation_scalar<Relation_Equal, float>(float a, float b) {
return fabsf(a - b) < 1e-6;
}
template<> __device__ inline bool ppl_relation_scalar<Relation_Greater, float>(float a, float b) {
return a > b;
}
template<> __device__ inline bool ppl_relation_scalar<Relation_Less, float>(float a, float b) {
return a < b;
}
template<> __device__ inline bool ppl_relation_scalar<Relation_Equal, int64_t>(int64_t a, int64_t b) {
return a == b;
}
template<> __device__ inline bool ppl_relation_scalar<Relation_Greater, int64_t>(int64_t a, int64_t b) {
return a > b;
}
template<> __device__ inline bool ppl_relation_scalar<Relation_Less, int64_t>(int64_t a, int64_t b) {
return a < b;
}
template<RelationOpType op_type>
__device__ inline bool ppl_relation_scalar_fp16(half a, half b);
template <>
__device__ inline bool ppl_relation_scalar_fp16<Relation_Equal>(half a, half b)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
return __heq(a, b);
#else
return 0;
#endif
}
template <>
__device__ inline bool ppl_relation_scalar_fp16<Relation_Greater>(half a, half b)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
return __hgt(a, b);
#else
return 0;
#endif
}
template <>
__device__ inline bool ppl_relation_scalar_fp16<Relation_Less>(half a, half b)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
return __hlt(a, b);
#else
return 0;
#endif
}
template <RelationOpType op_type>
static __device__ inline bool ppl_relation_vector_fp16(half a, half b)
{
bool res;
res = ppl_relation_scalar_fp16<op_type>(a, b);
return res;
}
template <RelationOpType op_type>
static __device__ inline bool8_ ppl_relation_vector_fp16(half8_ a, half8_ b)
{
bool8_ res;
res.x0 = ppl_relation_scalar_fp16<op_type>(a.x0, b.x0);
res.y0 = ppl_relation_scalar_fp16<op_type>(a.y0, b.y0);
res.z0 = ppl_relation_scalar_fp16<op_type>(a.z0, b.z0);
res.w0 = ppl_relation_scalar_fp16<op_type>(a.w0, b.w0);
res.x1 = ppl_relation_scalar_fp16<op_type>(a.x1, b.x1);
res.y1 = ppl_relation_scalar_fp16<op_type>(a.y1, b.y1);
res.z1 = ppl_relation_scalar_fp16<op_type>(a.z1, b.z1);
res.w1 = ppl_relation_scalar_fp16<op_type>(a.w1, b.w1);
return res;
}
static void ppl_pad_tensor_shape(const ppl::nn::TensorShape *tensor_shape0,
const ppl::nn::TensorShape *tensor_shape1,
ppl::nn::TensorShape *pad_tensor_shape0,
ppl::nn::TensorShape *pad_tensor_shape1) {
int max_dims = std::max(tensor_shape0->GetDimCount(), tensor_shape1->GetDimCount());
if (pad_tensor_shape0->GetDimCount() < pad_tensor_shape1->GetDimCount()) {
pad_tensor_shape0->SetDimCount(max_dims);
// pad 1 to shape_min_pad's higher dim
int offset = max_dims - tensor_shape0->GetDimCount();
for (int i = 0; i < offset; i++) {
pad_tensor_shape0->SetDim(i, 1);
}
for (int i = offset; i < max_dims; i++) {
pad_tensor_shape0->SetDim(i, tensor_shape0->GetDim(i - offset));
}
} else {
pad_tensor_shape1->SetDimCount(max_dims);
// pad 1 to shape_min_pad's higher dim
int offset = max_dims - tensor_shape1->GetDimCount();
for (int i = 0; i < offset; i++) {
pad_tensor_shape1->SetDim(i, 1);
}
for (int i = offset; i < max_dims; i++) {
pad_tensor_shape1->SetDim(i, tensor_shape1->GetDim(i - offset));
}
}
}
static int ppl_get_num_broadcast_dims(const ppl::nn::TensorShape *tensor_shape0,
const ppl::nn::TensorShape *tensor_shape1,
int &aixs) {
ppl::nn::TensorShape pad_tensor_shape0 = *tensor_shape0;
ppl::nn::TensorShape pad_tensor_shape1 = *tensor_shape1;
ppl_pad_tensor_shape(tensor_shape0, tensor_shape1,
&pad_tensor_shape0, &pad_tensor_shape1);
int dim_count = pad_tensor_shape0.GetDimCount();
int num_broadcast_dims = 0;
for(int it = 0; it < dim_count; ++it) {
if (pad_tensor_shape0.GetDim(it) != pad_tensor_shape1.GetDim(it))
++num_broadcast_dims;
}
if (num_broadcast_dims == 1) {
for(int it = 0; it < dim_count; ++it) {
if (pad_tensor_shape0.GetDim(it) != pad_tensor_shape1.GetDim(it))
aixs = it;
}
}
return num_broadcast_dims;
}
void ppl_relation_prepare_strides(
const ppl::nn::TensorShape* tensor_shape0,
const ppl::nn::TensorShape* tensor_shape1,
const ppl::nn::TensorShape* tensor_shape_out,
const int packed_channel,
uint32_t* stride_in0,
uint32_t* stride_in1,
uint32_t* stride_out)
{
ppl::nn::TensorShape pad_tensor_shape0 = *tensor_shape0;
ppl::nn::TensorShape pad_tensor_shape1 = *tensor_shape1;
int max_dims = tensor_shape_out->GetDimCount();
if (pad_tensor_shape0.GetDimCount() < pad_tensor_shape1.GetDimCount()) {
pad_tensor_shape0.SetDimCount(max_dims);
// pad 1 to shape_min_pad's higher dim
int offset = max_dims - tensor_shape0->GetDimCount();
for (int i = 0; i < offset; i++) {
pad_tensor_shape0.SetDim(i, 1);
}
for (int i = offset; i < max_dims; i++) {
pad_tensor_shape0.SetDim(i, tensor_shape0->GetDim(i - offset));
}
} else {
pad_tensor_shape1.SetDimCount(max_dims);
// pad 1 to shape_min_pad's higher dim
int offset = max_dims - tensor_shape1->GetDimCount();
for (int i = 0; i < offset; i++) {
pad_tensor_shape1.SetDim(i, 1);
}
for (int i = offset; i < max_dims; i++) {
pad_tensor_shape1.SetDim(i, tensor_shape1->GetDim(i - offset));
}
}
const int dimCount = tensor_shape_out->GetDimCount();
uint32_t stride0 = 1;
uint32_t stride1 = 1;
uint32_t stride_out0 = 1;
for (int i = dimCount - 1; i >= 0; i--) {
stride_in0[i] = pad_tensor_shape0.GetDim(i) == 1 ? 0 : stride0;
stride_in1[i] = pad_tensor_shape1.GetDim(i) == 1 ? 0 : stride1;
stride_out[i] = stride_out0;
if (i == 1) { // for channel dim, div packed_channel
stride0 *= (pad_tensor_shape0.GetDim(i) + packed_channel - 1) / packed_channel;
stride1 *= (pad_tensor_shape1.GetDim(i) + packed_channel - 1) / packed_channel;
stride_out0 *= (tensor_shape_out->GetDim(i) + packed_channel - 1) / packed_channel;
} else {
stride0 *= pad_tensor_shape0.GetDim(i);
stride1 *= pad_tensor_shape1.GetDim(i);
stride_out0 *= tensor_shape_out->GetDim(i);
}
}
}
void ppl_relation_prepare_strides_nhwc(
const ppl::nn::TensorShape *tensor_shape0,
const ppl::nn::TensorShape *tensor_shape1,
const ppl::nn::TensorShape *tensor_shape_out,
const int packed_channel,
uint32_t *stride_in0,
uint32_t *stride_in1,
uint32_t *stride_out)
{
if (tensor_shape0->GetDimCount() < 2 || tensor_shape1->GetDimCount() < 2) return;
ppl::nn::TensorShape pad_tensor_shape0 = *tensor_shape0;
ppl::nn::TensorShape pad_tensor_shape1 = *tensor_shape1;
int max_dims = tensor_shape_out->GetDimCount();
if (pad_tensor_shape0.GetDimCount() < pad_tensor_shape1.GetDimCount()) {
pad_tensor_shape0.SetDimCount(max_dims);
// pad 1 to shape_min_pad's higher dim
int offset = max_dims - tensor_shape0->GetDimCount();
for (int i = 0; i < offset; i++) {
pad_tensor_shape0.SetDim(i, 1);
}
for (int i = offset; i < max_dims; i++) {
pad_tensor_shape0.SetDim(i, tensor_shape0->GetDim(i - offset));
}
} else {
pad_tensor_shape1.SetDimCount(max_dims);
// pad 1 to shape_min_pad's higher dim
int offset = max_dims - tensor_shape1->GetDimCount();
for (int i = 0; i < offset; i++) {
pad_tensor_shape1.SetDim(i, 1);
}
for (int i = offset; i < max_dims; i++) {
pad_tensor_shape1.SetDim(i, tensor_shape1->GetDim(i - offset));
}
}
const int dimCount = tensor_shape_out->GetDimCount();
uint32_t stride0 = 1;
uint32_t stride1 = 1;
uint32_t stride_out0 = 1;
for (int stride_pos = dimCount - 1; stride_pos >= 0; stride_pos--) {
int i = stride_pos;
if (stride_pos == dimCount - 1) i = 1;
else if (stride_pos == 0) i = 0;
else i = stride_pos + 1;
stride_in0[stride_pos] = pad_tensor_shape0.GetDim(i) == 1 ? 0 : stride0;
stride_in1[stride_pos] = pad_tensor_shape1.GetDim(i) == 1 ? 0 : stride1;
stride_out[stride_pos] = stride_out0;
if (i == 1) { // for channel dim, div packed_channel
stride0 *= (pad_tensor_shape0.GetDim(i) + packed_channel - 1) / packed_channel;
stride1 *= (pad_tensor_shape1.GetDim(i) + packed_channel - 1) / packed_channel;
stride_out0 *= (tensor_shape_out->GetDim(i) + packed_channel - 1) / packed_channel;
} else {
stride0 *= pad_tensor_shape0.GetDim(i);
stride1 *= pad_tensor_shape1.GetDim(i);
stride_out0 *= tensor_shape_out->GetDim(i);
}
}
}
#define MAXDIMENSIONS 7
struct RelationParam {
uint32_t stride_in0[MAXDIMENSIONS];
uint32_t stride_in1[MAXDIMENSIONS];
uint32_t stride_out[MAXDIMENSIONS];
};
template <RelationOpType op_type, typename T1, typename T2>
__global__ void ppl_cukernel_relation_fp16(
const uint64_t num_elems,
const int dim_count,
RelationParam param,
const T1* input0,
const T1* input1,
T2* output)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
uint64_t out_index = index;
uint64_t offset0 = 0;
uint64_t offset1 = 0;
for (int i = 0; i < dim_count; i++) {
uint64_t dim_off = index / param.stride_out[i];
offset0 += dim_off * param.stride_in0[i];
offset1 += dim_off * param.stride_in1[i];
index = index % param.stride_out[i];
}
output[out_index] = ppl_relation_vector_fp16<op_type>(input0[offset0], input1[offset1]);
#endif
}
template<RelationOpType op_type, typename T>
__global__ void ppl_cukernel_relation(
const uint64_t num_elems,
const int dim_count,
RelationParam param,
const T *input0,
const T* input1,
bool *output) {
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems) return;
uint64_t out_index = index;
uint64_t offset0 = 0;
uint64_t offset1 = 0;
for (int i = 0; i < dim_count; i++) {
uint64_t dim_off = index / param.stride_out[i];
offset0 += dim_off * param.stride_in0[i];
offset1 += dim_off * param.stride_in1[i];
index = index % param.stride_out[i];
}
output[out_index] = ppl_relation_scalar<op_type, T>(input0[offset0], input1[offset1]);
}
template<RelationOpType op_type, typename T>
__global__ void ppl_cukernel_relation_naive(
const uint64_t num_elems,
const T *input0,
const T* input1,
bool *output) {
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems) return;
output[index] = ppl_relation_scalar<op_type, T>(input0[index], input1[index]);
}
template<RelationOpType op_type, typename T>
__global__ void ppl_cukernel_relation_one_scalar(
const uint64_t num_elems,
const bool first_shorter,
const T *input0,
const T* input1,
bool *output) {
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems) return;
int calc_index = 0;
uint64_t offset0 = first_shorter ? calc_index : index;
uint64_t offset1 = first_shorter ? index : calc_index;
output[index] = ppl_relation_scalar<op_type, T>(input0[offset0], input1[offset1]);
}
template<RelationOpType op_type, typename T>
__global__ void ppl_cukernel_relation_one_broadcast(
const uint64_t num_elems,
const int outer_stride,
const int inner_dim,
const bool first_shorter,
const T *input0,
const T* input1,
bool *output) {
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems) return;
int inner_idx = index % inner_dim;
int outer_idx = index / outer_stride;
uint64_t calc_index = outer_idx * inner_dim + inner_idx;
uint64_t offset0 = first_shorter ? calc_index : index;
uint64_t offset1 = first_shorter ? index : calc_index;
output[index] = ppl_relation_scalar<op_type, T>(input0[offset0], input1[offset1]);
}
template <RelationOpType op_type>
ppl::common::RetCode PPLCUDARelationForwardImpFp16(
cudaStream_t stream,
const ppl::nn::TensorShape* input_shape0,
const half* input0,
const ppl::nn::TensorShape* input_shape1,
const half* input1,
const ppl::nn::TensorShape* output_shape,
bool* output)
{
RelationParam param;
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int dim_count = output_shape->GetDimCount();
int block_size = 256;
#define SWITCH_CASE(FORMAT, TYPE, TYPE2, SHIFT, PACKED) \
case FORMAT: { \
int channel_shift = SHIFT; \
int packed_channel = PACKED; \
uint64_t grid_size = ((num_elems >> channel_shift) + block_size - 1) / block_size; \
ppl_relation_prepare_strides(input_shape0, input_shape1, output_shape, packed_channel, param.stride_in0, param.stride_in1, param.stride_out); \
ppl_cukernel_relation_fp16<op_type, TYPE, TYPE2><<<grid_size, \
block_size, \
0, \
stream>>>(num_elems >> channel_shift, dim_count, param, (const TYPE*)input0, (const TYPE*)input1, (TYPE2*)output); \
return ppl::common::RC_SUCCESS; \
}
switch (output_shape->GetDataFormat()) {
SWITCH_CASE(ppl::common::DATAFORMAT_NDARRAY, half, bool, 0, 1);
case ppl::common::DATAFORMAT_NHWC: {
bool can_broadcast = (input_shape0->GetDimCount() >= 2) && (input_shape1->GetDimCount() >= 2);
if (!can_broadcast) return ppl::common::RC_UNSUPPORTED;
if ((input_shape0->GetDim(1) & 0x7) || (input_shape1->GetDim(1) & 0x7)) return ppl::common::RC_UNSUPPORTED;
int channel_shift = 3;
int packed_channel = 8;
uint64_t grid_size = ((num_elems >> channel_shift) + block_size - 1) / block_size;
ppl_relation_prepare_strides_nhwc(input_shape0, input_shape1, output_shape, packed_channel, param.stride_in0, param.stride_in1, param.stride_out);
ppl_cukernel_relation_fp16<op_type, half8_, bool8_><<<grid_size, block_size, 0, stream>>>(num_elems >> channel_shift, dim_count, param, (const half8_ *)input0, (const half8_ *)input1, (bool8_ *)output);
return ppl::common::RC_SUCCESS;
}
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
template<RelationOpType op_type, typename T>
ppl::common::RetCode PPLCUDARelationForwardImp(
cudaStream_t stream,
const ppl::nn::TensorShape* input_shape0,
const T *input0,
const ppl::nn::TensorShape* input_shape1,
const T *input1,
const ppl::nn::TensorShape* output_shape,
bool *output) {
RelationParam param;
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int dim_count = output_shape->GetDimCount();
int block_size = 256;
int axis = 0;
int num_broadcast_dims = ppl_get_num_broadcast_dims(input_shape0, input_shape1, axis);
if (num_broadcast_dims == 0) {
uint64_t grid_size = (num_elems + block_size - 1) / block_size;
ppl_cukernel_relation_naive<op_type, T><<<grid_size,
block_size, 0, stream>>>(num_elems,
(const T*)input0, (const T*)input1, (bool*)output);
} else if (num_broadcast_dims == dim_count) {
uint64_t grid_size = (num_elems + block_size - 1) / block_size;
bool first_shorter = false;
if (input_shape0->GetRealDimCount() == input_shape1->GetRealDimCount() &&
input_shape0->GetDim(axis) < input_shape1->GetDim(axis)) {
first_shorter = true;
}
if (input_shape0->GetRealDimCount() < input_shape1->GetRealDimCount()) {
first_shorter = true;
}
ppl_cukernel_relation_one_scalar<op_type, T><<<grid_size,
block_size, 0, stream>>>(num_elems, first_shorter,
(const T*)input0, (const T*)input1, (bool*)output);
} else if (num_broadcast_dims == 1) {
int inner_dim = 1;
uint64_t grid_size = (num_elems + block_size - 1) / block_size;
for(int it = axis + 1; it < dim_count; inner_dim *= output_shape->GetDim(it), ++it);
int outer_stride = inner_dim * output_shape->GetDim(axis);
bool first_shorter = false;
if (input_shape0->GetRealDimCount() == input_shape1->GetRealDimCount() &&
input_shape0->GetDim(axis) < input_shape1->GetDim(axis)) {
first_shorter = true;
}
if (input_shape0->GetRealDimCount() < input_shape1->GetRealDimCount()) {
first_shorter = true;
}
ppl_cukernel_relation_one_broadcast<op_type, T><<<grid_size,
block_size, 0, stream>>>(num_elems, outer_stride, inner_dim, first_shorter,
(const T*)input0, (const T*)input1, (bool*)output);
} else {
int packed_channel = 1;
uint64_t grid_size = (num_elems + block_size - 1) / block_size;
ppl_relation_prepare_strides(input_shape0, input_shape1,
output_shape, packed_channel, param.stride_in0, param.stride_in1, param.stride_out);
ppl_cukernel_relation<op_type, T><<<grid_size,
block_size, 0, stream>>>(num_elems, dim_count, param,
(const T*)input0, (const T*)input1, (bool*)output);
}
return ppl::common::RC_SUCCESS;
}
#define INSTANT(OPTYPE) \
ppl::common::RetCode PPLCUDARelation##OPTYPE##ForwardImp( \
cudaStream_t stream, \
const ppl::nn::TensorShape* input_shape0, \
const void *input0, \
const ppl::nn::TensorShape* input_shape1, \
const void *input1, \
const ppl::nn::TensorShape* output_shape, \
bool *output) { \
if (input_shape0->GetDataType() == ppl::common::DATATYPE_FLOAT16) { \
return PPLCUDARelationForwardImpFp16<Relation_##OPTYPE>(stream, \
input_shape0, (const half*)input0, input_shape1, \
(const half*)input1, output_shape, output); \
} else if (input_shape0->GetDataType() == ppl::common::DATATYPE_FLOAT32) { \
return PPLCUDARelationForwardImp<Relation_##OPTYPE, float>(stream, \
input_shape0, (const float*)input0, input_shape1, \
(const float*)input1, output_shape, output); \
} else if (input_shape0->GetDataType() == ppl::common::DATATYPE_INT64) { \
return PPLCUDARelationForwardImp<Relation_##OPTYPE, int64_t>(stream, \
input_shape0, (const int64_t*)input0, input_shape1, \
(const int64_t*)input1, output_shape, output); \
} else { \
return ppl::common::RC_UNSUPPORTED; \
} \
}
INSTANT(Equal);
INSTANT(Greater);
INSTANT(Less);
#undef INSTANT
|
ce7f07085c24c1225f7431c4794b182701b7088f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// nvcc -o cube cube.cu
#include <stdio.h>
__global__ void cube(float *d_out, float *d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
} | ce7f07085c24c1225f7431c4794b182701b7088f.cu | // nvcc -o cube cube.cu
#include <stdio.h>
__global__ void cube(float *d_out, float *d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
cube<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
} |
e5eeeb680b4f20ee97b31990f5a58c7887164737.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "deform_conv_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void deformable_im2col(Tensor data_im, Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
Tensor data_col) {
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
hipLaunchKernelGGL(( deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)),
dim3(THREADS_PER_BLOCK), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_im_, data_offset_, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels,
deformable_group, height_col, width_col, data_col_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
void deformable_col2im(Tensor data_col, Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
Tensor grad_im) {
// todo: make sure parallel_imgs is passed in correctly
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels =
channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)),
dim3(THREADS_PER_BLOCK), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_col_, data_offset_, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h,
dilation_w, channel_per_deformable_group, parallel_imgs,
deformable_group, height_col, width_col, grad_im_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
void deformable_col2im_coord(
Tensor data_col, Tensor data_im, Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, Tensor grad_offset) {
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w *
deformable_group * parallel_imgs;
int channel_per_deformable_group =
channels * ksize_h * ksize_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_col_, data_im_, data_offset_, channels, height,
width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs,
2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
void deform_conv_shape_check(Tensor input, Tensor offset, Tensor *gradOutput,
Tensor weight, int kH, int kW, int dH, int dW,
int padH, int padW, int dilationH, int dilationW,
int group, int deformable_group) {
TORCH_CHECK(
weight.ndimension() == 4,
"4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, but got: %s",
weight.ndimension());
TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous");
TORCH_CHECK(kW > 0 && kH > 0,
"kernel size should be greater than zero, but got kH: %d kW: %d",
kH, kW);
TORCH_CHECK((weight.size(2) == kH && weight.size(3) == kW),
"kernel size should be consistent with weight, ",
"but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d",
kH, kW, weight.size(2), weight.size(3));
TORCH_CHECK(dW > 0 && dH > 0,
"stride should be greater than zero, but got dH: %d dW: %d", dH,
dW);
TORCH_CHECK(
dilationW > 0 && dilationH > 0,
"dilation should be greater than 0, but got dilationH: %d dilationW: %d",
dilationH, dilationW);
int ndim = input.ndimension();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
TORCH_CHECK(ndim == 3 || ndim == 4,
"3D or 4D input tensor expected but got: %s", ndim);
long nInputPlane = weight.size(1) * group;
long inputHeight = input.size(dimh);
long inputWidth = input.size(dimw);
long nOutputPlane = weight.size(0);
long outputHeight =
(inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
long outputWidth =
(inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
TORCH_CHECK(nInputPlane % deformable_group == 0,
"input channels must divide deformable group size");
if (outputWidth < 1 || outputHeight < 1)
AT_ERROR(
"Given input size: (%ld x %ld x %ld). "
"Calculated output size: (%ld x %ld x %ld). Output size is too small",
nInputPlane, inputHeight, inputWidth, nOutputPlane, outputHeight,
outputWidth);
TORCH_CHECK(input.size(1) == nInputPlane,
"invalid number of input planes, expected: %d, but got: %d",
nInputPlane, input.size(1));
TORCH_CHECK((inputHeight >= kH && inputWidth >= kW),
"input image is smaller than kernel");
TORCH_CHECK(
(offset.size(2) == outputHeight && offset.size(3) == outputWidth),
"invalid spatial size of offset, expected height: %d width: %d, but "
"got height: %d width: %d",
outputHeight, outputWidth, offset.size(2), offset.size(3));
TORCH_CHECK((offset.size(1) == deformable_group * 2 * kH * kW),
"invalid number of channels of offset");
if (gradOutput != NULL) {
TORCH_CHECK(
gradOutput->size(dimf) == nOutputPlane,
"invalid number of gradOutput planes, expected: %d, but got: %d",
nOutputPlane, gradOutput->size(dimf));
TORCH_CHECK(
(gradOutput->size(dimh) == outputHeight &&
gradOutput->size(dimw) == outputWidth),
"invalid size of gradOutput, expected height: %d width: %d , but "
"got height: %d width: %d",
outputHeight, outputWidth, gradOutput->size(dimh),
gradOutput->size(dimw));
}
}
void DeformConvForwardCUDAKernelLauncher(Tensor input, Tensor weight,
Tensor offset, Tensor output,
Tensor columns, Tensor ones, int kW,
int kH, int dW, int dH, int padW,
int padH, int dilationW, int dilationH,
int group, int deformable_group,
int im2col_step) {
// todo: resize columns to include im2col: done
// todo: add im2col_step as input
// todo: add new output buffer and transpose it to output (or directly
// transpose output) todo: possibly change data indexing because of
// parallel_imgs
deform_conv_shape_check(input, offset, NULL, weight, kH, kW, dH, dW, padH,
padW, dilationH, dilationW, group, deformable_group);
at::DeviceGuard guard(input.device());
int batch = 1;
if (input.ndimension() == 3) {
// Force batch
batch = 0;
input.unsqueeze_(0);
offset.unsqueeze_(0);
}
// todo: assert batchsize dividable by im2col_step
long batchSize = input.size(0);
long nInputPlane = input.size(1);
long inputHeight = input.size(2);
long inputWidth = input.size(3);
long nOutputPlane = weight.size(0);
long outputWidth =
(inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
long outputHeight =
(inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset");
output = output.view({batchSize / im2col_step, im2col_step, nOutputPlane,
outputHeight, outputWidth});
columns = at::zeros(
{nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth},
input.options());
if (ones.ndimension() != 2 ||
ones.size(0) * ones.size(1) < outputHeight * outputWidth) {
ones = at::ones({outputHeight, outputWidth}, input.options());
}
input = input.view({batchSize / im2col_step, im2col_step, nInputPlane,
inputHeight, inputWidth});
offset =
offset.view({batchSize / im2col_step, im2col_step,
deformable_group * 2 * kH * kW, outputHeight, outputWidth});
Tensor output_buffer = at::zeros({batchSize / im2col_step, nOutputPlane,
im2col_step * outputHeight, outputWidth},
output.options());
output_buffer = output_buffer.view(
{output_buffer.size(0), group, output_buffer.size(1) / group,
output_buffer.size(2), output_buffer.size(3)});
for (int elt = 0; elt < batchSize / im2col_step; elt++) {
deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight,
inputWidth, kH, kW, padH, padW, dH, dW, dilationH,
dilationW, im2col_step, deformable_group, columns);
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
weight = weight.view({group, weight.size(0) / group, weight.size(1),
weight.size(2), weight.size(3)});
for (int g = 0; g < group; g++) {
output_buffer[elt][g] = output_buffer[elt][g]
.flatten(1)
.addmm_(weight[g].flatten(1), columns[g])
.view_as(output_buffer[elt][g]);
}
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
}
output_buffer = output_buffer.view(
{output_buffer.size(0), output_buffer.size(1) * output_buffer.size(2),
output_buffer.size(3), output_buffer.size(4)});
output_buffer = output_buffer.view({batchSize / im2col_step, nOutputPlane,
im2col_step, outputHeight, outputWidth});
output_buffer.transpose_(1, 2);
output.copy_(output_buffer);
output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth});
input = input.view({batchSize, nInputPlane, inputHeight, inputWidth});
offset = offset.view(
{batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth});
if (batch == 0) {
output = output.view({nOutputPlane, outputHeight, outputWidth});
input = input.view({nInputPlane, inputHeight, inputWidth});
offset = offset.view({offset.size(1), offset.size(2), offset.size(3)});
}
}
void DeformConvBackwardInputCUDAKernelLauncher(
Tensor input, Tensor offset, Tensor gradOutput, Tensor gradInput,
Tensor gradOffset, Tensor weight, Tensor columns, int kW, int kH, int dW,
int dH, int padW, int padH, int dilationW, int dilationH, int group,
int deformable_group, int im2col_step) {
deform_conv_shape_check(input, offset, &gradOutput, weight, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, group,
deformable_group);
at::DeviceGuard guard(input.device());
int batch = 1;
if (input.ndimension() == 3) {
// Force batch
batch = 0;
input = input.view({1, input.size(0), input.size(1), input.size(2)});
offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)});
gradOutput = gradOutput.view(
{1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)});
}
long batchSize = input.size(0);
long nInputPlane = input.size(1);
long inputHeight = input.size(2);
long inputWidth = input.size(3);
long nOutputPlane = weight.size(0);
long outputWidth =
(inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
long outputHeight =
(inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
TORCH_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset");
gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth});
columns = at::zeros(
{nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth},
input.options());
// change order of grad output
gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step,
nOutputPlane, outputHeight, outputWidth});
gradOutput.transpose_(1, 2);
gradInput = gradInput.view({batchSize / im2col_step, im2col_step, nInputPlane,
inputHeight, inputWidth});
input = input.view({batchSize / im2col_step, im2col_step, nInputPlane,
inputHeight, inputWidth});
gradOffset = gradOffset.view({batchSize / im2col_step, im2col_step,
deformable_group * 2 * kH * kW, outputHeight,
outputWidth});
offset =
offset.view({batchSize / im2col_step, im2col_step,
deformable_group * 2 * kH * kW, outputHeight, outputWidth});
for (int elt = 0; elt < batchSize / im2col_step; elt++) {
// divide into groups
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
weight = weight.view({group, weight.size(0) / group, weight.size(1),
weight.size(2), weight.size(3)});
gradOutput = gradOutput.view(
{gradOutput.size(0), group, gradOutput.size(1) / group,
gradOutput.size(2), gradOutput.size(3), gradOutput.size(4)});
for (int g = 0; g < group; g++) {
columns[g] = columns[g].addmm_(weight[g].flatten(1).transpose(0, 1),
gradOutput[elt][g].flatten(1), 0.0f, 1.0f);
}
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
gradOutput = gradOutput.view(
{gradOutput.size(0), gradOutput.size(1) * gradOutput.size(2),
gradOutput.size(3), gradOutput.size(4), gradOutput.size(5)});
deformable_col2im_coord(columns, input[elt], offset[elt], nInputPlane,
inputHeight, inputWidth, kH, kW, padH, padW, dH, dW,
dilationH, dilationW, im2col_step, deformable_group,
gradOffset[elt]);
deformable_col2im(columns, offset[elt], nInputPlane, inputHeight,
inputWidth, kH, kW, padH, padW, dH, dW, dilationH,
dilationW, im2col_step, deformable_group, gradInput[elt]);
}
gradOutput.transpose_(1, 2);
gradOutput =
gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth});
gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth});
input = input.view({batchSize, nInputPlane, inputHeight, inputWidth});
gradOffset = gradOffset.view(
{batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth});
offset = offset.view(
{batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth});
if (batch == 0) {
gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth});
input = input.view({nInputPlane, inputHeight, inputWidth});
gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth});
offset = offset.view({offset.size(1), offset.size(2), offset.size(3)});
gradOffset =
gradOffset.view({offset.size(1), offset.size(2), offset.size(3)});
}
}
void DeformConvBackwardParametersCUDAKernelLauncher(
Tensor input, Tensor offset, Tensor gradOutput, Tensor gradWeight,
Tensor columns, Tensor ones, int kW, int kH, int dW, int dH, int padW,
int padH, int dilationW, int dilationH, int group, int deformable_group,
float scale, int im2col_step) {
// todo: transpose and reshape outGrad
// todo: reshape columns
// todo: add im2col_step as input
deform_conv_shape_check(input, offset, &gradOutput, gradWeight, kH, kW, dH,
dW, padH, padW, dilationH, dilationW, group,
deformable_group);
at::DeviceGuard guard(input.device());
int batch = 1;
if (input.ndimension() == 3) {
// Force batch
batch = 0;
input = input.view(
at::IntList({1, input.size(0), input.size(1), input.size(2)}));
gradOutput = gradOutput.view(
{1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)});
}
long batchSize = input.size(0);
long nInputPlane = input.size(1);
long inputHeight = input.size(2);
long inputWidth = input.size(3);
long nOutputPlane = gradWeight.size(0);
long outputWidth =
(inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
long outputHeight =
(inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset");
columns = at::zeros(
{nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth},
input.options());
gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step,
nOutputPlane, outputHeight, outputWidth});
gradOutput.transpose_(1, 2);
Tensor gradOutputBuffer = at::zeros_like(gradOutput);
gradOutputBuffer =
gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, im2col_step,
outputHeight, outputWidth});
gradOutputBuffer = gradOutputBuffer.contiguous();
gradOutputBuffer.copy_(gradOutput);
gradOutputBuffer =
gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane,
im2col_step * outputHeight, outputWidth});
gradOutput.transpose_(1, 2);
gradOutput =
gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth});
input = input.view({batchSize / im2col_step, im2col_step, nInputPlane,
inputHeight, inputWidth});
offset =
offset.view({batchSize / im2col_step, im2col_step,
deformable_group * 2 * kH * kW, outputHeight, outputWidth});
for (int elt = 0; elt < batchSize / im2col_step; elt++) {
deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight,
inputWidth, kH, kW, padH, padW, dH, dW, dilationH,
dilationW, im2col_step, deformable_group, columns);
// divide into group
gradOutputBuffer = gradOutputBuffer.view(
{gradOutputBuffer.size(0), group, gradOutputBuffer.size(1) / group,
gradOutputBuffer.size(2), gradOutputBuffer.size(3)});
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
gradWeight =
gradWeight.view({group, gradWeight.size(0) / group, gradWeight.size(1),
gradWeight.size(2), gradWeight.size(3)});
for (int g = 0; g < group; g++) {
gradWeight[g] = gradWeight[g]
.flatten(1)
.addmm_(gradOutputBuffer[elt][g].flatten(1),
columns[g].transpose(1, 0), 1.0, scale)
.view_as(gradWeight[g]);
}
gradOutputBuffer = gradOutputBuffer.view(
{gradOutputBuffer.size(0),
gradOutputBuffer.size(1) * gradOutputBuffer.size(2),
gradOutputBuffer.size(3), gradOutputBuffer.size(4)});
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
gradWeight = gradWeight.view({gradWeight.size(0) * gradWeight.size(1),
gradWeight.size(2), gradWeight.size(3),
gradWeight.size(4)});
}
input = input.view({batchSize, nInputPlane, inputHeight, inputWidth});
offset = offset.view(
{batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth});
if (batch == 0) {
gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth});
input = input.view({nInputPlane, inputHeight, inputWidth});
}
}
| e5eeeb680b4f20ee97b31990f5a58c7887164737.cu | #include "deform_conv_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void deformable_im2col(Tensor data_im, Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
Tensor data_col) {
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels),
THREADS_PER_BLOCK, 0,
at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_im_, data_offset_, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels,
deformable_group, height_col, width_col, data_col_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
void deformable_col2im(Tensor data_col, Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
Tensor grad_im) {
// todo: make sure parallel_imgs is passed in correctly
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels =
channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>();
deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels),
THREADS_PER_BLOCK, 0,
at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_col_, data_offset_, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h,
dilation_w, channel_per_deformable_group, parallel_imgs,
deformable_group, height_col, width_col, grad_im_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
void deformable_col2im_coord(
Tensor data_col, Tensor data_im, Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, Tensor grad_offset) {
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w *
deformable_group * parallel_imgs;
int channel_per_deformable_group =
channels * ksize_h * ksize_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>();
deformable_col2im_coord_gpu_kernel<<<
GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0,
at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_col_, data_im_, data_offset_, channels, height,
width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs,
2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
void deform_conv_shape_check(Tensor input, Tensor offset, Tensor *gradOutput,
Tensor weight, int kH, int kW, int dH, int dW,
int padH, int padW, int dilationH, int dilationW,
int group, int deformable_group) {
TORCH_CHECK(
weight.ndimension() == 4,
"4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, but got: %s",
weight.ndimension());
TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous");
TORCH_CHECK(kW > 0 && kH > 0,
"kernel size should be greater than zero, but got kH: %d kW: %d",
kH, kW);
TORCH_CHECK((weight.size(2) == kH && weight.size(3) == kW),
"kernel size should be consistent with weight, ",
"but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d",
kH, kW, weight.size(2), weight.size(3));
TORCH_CHECK(dW > 0 && dH > 0,
"stride should be greater than zero, but got dH: %d dW: %d", dH,
dW);
TORCH_CHECK(
dilationW > 0 && dilationH > 0,
"dilation should be greater than 0, but got dilationH: %d dilationW: %d",
dilationH, dilationW);
int ndim = input.ndimension();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
TORCH_CHECK(ndim == 3 || ndim == 4,
"3D or 4D input tensor expected but got: %s", ndim);
long nInputPlane = weight.size(1) * group;
long inputHeight = input.size(dimh);
long inputWidth = input.size(dimw);
long nOutputPlane = weight.size(0);
long outputHeight =
(inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
long outputWidth =
(inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
TORCH_CHECK(nInputPlane % deformable_group == 0,
"input channels must divide deformable group size");
if (outputWidth < 1 || outputHeight < 1)
AT_ERROR(
"Given input size: (%ld x %ld x %ld). "
"Calculated output size: (%ld x %ld x %ld). Output size is too small",
nInputPlane, inputHeight, inputWidth, nOutputPlane, outputHeight,
outputWidth);
TORCH_CHECK(input.size(1) == nInputPlane,
"invalid number of input planes, expected: %d, but got: %d",
nInputPlane, input.size(1));
TORCH_CHECK((inputHeight >= kH && inputWidth >= kW),
"input image is smaller than kernel");
TORCH_CHECK(
(offset.size(2) == outputHeight && offset.size(3) == outputWidth),
"invalid spatial size of offset, expected height: %d width: %d, but "
"got height: %d width: %d",
outputHeight, outputWidth, offset.size(2), offset.size(3));
TORCH_CHECK((offset.size(1) == deformable_group * 2 * kH * kW),
"invalid number of channels of offset");
if (gradOutput != NULL) {
TORCH_CHECK(
gradOutput->size(dimf) == nOutputPlane,
"invalid number of gradOutput planes, expected: %d, but got: %d",
nOutputPlane, gradOutput->size(dimf));
TORCH_CHECK(
(gradOutput->size(dimh) == outputHeight &&
gradOutput->size(dimw) == outputWidth),
"invalid size of gradOutput, expected height: %d width: %d , but "
"got height: %d width: %d",
outputHeight, outputWidth, gradOutput->size(dimh),
gradOutput->size(dimw));
}
}
void DeformConvForwardCUDAKernelLauncher(Tensor input, Tensor weight,
Tensor offset, Tensor output,
Tensor columns, Tensor ones, int kW,
int kH, int dW, int dH, int padW,
int padH, int dilationW, int dilationH,
int group, int deformable_group,
int im2col_step) {
// todo: resize columns to include im2col: done
// todo: add im2col_step as input
// todo: add new output buffer and transpose it to output (or directly
// transpose output) todo: possibly change data indexing because of
// parallel_imgs
deform_conv_shape_check(input, offset, NULL, weight, kH, kW, dH, dW, padH,
padW, dilationH, dilationW, group, deformable_group);
at::DeviceGuard guard(input.device());
int batch = 1;
if (input.ndimension() == 3) {
// Force batch
batch = 0;
input.unsqueeze_(0);
offset.unsqueeze_(0);
}
// todo: assert batchsize dividable by im2col_step
long batchSize = input.size(0);
long nInputPlane = input.size(1);
long inputHeight = input.size(2);
long inputWidth = input.size(3);
long nOutputPlane = weight.size(0);
long outputWidth =
(inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
long outputHeight =
(inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset");
output = output.view({batchSize / im2col_step, im2col_step, nOutputPlane,
outputHeight, outputWidth});
columns = at::zeros(
{nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth},
input.options());
if (ones.ndimension() != 2 ||
ones.size(0) * ones.size(1) < outputHeight * outputWidth) {
ones = at::ones({outputHeight, outputWidth}, input.options());
}
input = input.view({batchSize / im2col_step, im2col_step, nInputPlane,
inputHeight, inputWidth});
offset =
offset.view({batchSize / im2col_step, im2col_step,
deformable_group * 2 * kH * kW, outputHeight, outputWidth});
Tensor output_buffer = at::zeros({batchSize / im2col_step, nOutputPlane,
im2col_step * outputHeight, outputWidth},
output.options());
output_buffer = output_buffer.view(
{output_buffer.size(0), group, output_buffer.size(1) / group,
output_buffer.size(2), output_buffer.size(3)});
for (int elt = 0; elt < batchSize / im2col_step; elt++) {
deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight,
inputWidth, kH, kW, padH, padW, dH, dW, dilationH,
dilationW, im2col_step, deformable_group, columns);
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
weight = weight.view({group, weight.size(0) / group, weight.size(1),
weight.size(2), weight.size(3)});
for (int g = 0; g < group; g++) {
output_buffer[elt][g] = output_buffer[elt][g]
.flatten(1)
.addmm_(weight[g].flatten(1), columns[g])
.view_as(output_buffer[elt][g]);
}
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
}
output_buffer = output_buffer.view(
{output_buffer.size(0), output_buffer.size(1) * output_buffer.size(2),
output_buffer.size(3), output_buffer.size(4)});
output_buffer = output_buffer.view({batchSize / im2col_step, nOutputPlane,
im2col_step, outputHeight, outputWidth});
output_buffer.transpose_(1, 2);
output.copy_(output_buffer);
output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth});
input = input.view({batchSize, nInputPlane, inputHeight, inputWidth});
offset = offset.view(
{batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth});
if (batch == 0) {
output = output.view({nOutputPlane, outputHeight, outputWidth});
input = input.view({nInputPlane, inputHeight, inputWidth});
offset = offset.view({offset.size(1), offset.size(2), offset.size(3)});
}
}
void DeformConvBackwardInputCUDAKernelLauncher(
Tensor input, Tensor offset, Tensor gradOutput, Tensor gradInput,
Tensor gradOffset, Tensor weight, Tensor columns, int kW, int kH, int dW,
int dH, int padW, int padH, int dilationW, int dilationH, int group,
int deformable_group, int im2col_step) {
deform_conv_shape_check(input, offset, &gradOutput, weight, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, group,
deformable_group);
at::DeviceGuard guard(input.device());
int batch = 1;
if (input.ndimension() == 3) {
// Force batch
batch = 0;
input = input.view({1, input.size(0), input.size(1), input.size(2)});
offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)});
gradOutput = gradOutput.view(
{1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)});
}
long batchSize = input.size(0);
long nInputPlane = input.size(1);
long inputHeight = input.size(2);
long inputWidth = input.size(3);
long nOutputPlane = weight.size(0);
long outputWidth =
(inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
long outputHeight =
(inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
TORCH_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset");
gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth});
columns = at::zeros(
{nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth},
input.options());
// change order of grad output
gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step,
nOutputPlane, outputHeight, outputWidth});
gradOutput.transpose_(1, 2);
gradInput = gradInput.view({batchSize / im2col_step, im2col_step, nInputPlane,
inputHeight, inputWidth});
input = input.view({batchSize / im2col_step, im2col_step, nInputPlane,
inputHeight, inputWidth});
gradOffset = gradOffset.view({batchSize / im2col_step, im2col_step,
deformable_group * 2 * kH * kW, outputHeight,
outputWidth});
offset =
offset.view({batchSize / im2col_step, im2col_step,
deformable_group * 2 * kH * kW, outputHeight, outputWidth});
for (int elt = 0; elt < batchSize / im2col_step; elt++) {
// divide into groups
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
weight = weight.view({group, weight.size(0) / group, weight.size(1),
weight.size(2), weight.size(3)});
gradOutput = gradOutput.view(
{gradOutput.size(0), group, gradOutput.size(1) / group,
gradOutput.size(2), gradOutput.size(3), gradOutput.size(4)});
for (int g = 0; g < group; g++) {
columns[g] = columns[g].addmm_(weight[g].flatten(1).transpose(0, 1),
gradOutput[elt][g].flatten(1), 0.0f, 1.0f);
}
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
gradOutput = gradOutput.view(
{gradOutput.size(0), gradOutput.size(1) * gradOutput.size(2),
gradOutput.size(3), gradOutput.size(4), gradOutput.size(5)});
deformable_col2im_coord(columns, input[elt], offset[elt], nInputPlane,
inputHeight, inputWidth, kH, kW, padH, padW, dH, dW,
dilationH, dilationW, im2col_step, deformable_group,
gradOffset[elt]);
deformable_col2im(columns, offset[elt], nInputPlane, inputHeight,
inputWidth, kH, kW, padH, padW, dH, dW, dilationH,
dilationW, im2col_step, deformable_group, gradInput[elt]);
}
gradOutput.transpose_(1, 2);
gradOutput =
gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth});
gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth});
input = input.view({batchSize, nInputPlane, inputHeight, inputWidth});
gradOffset = gradOffset.view(
{batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth});
offset = offset.view(
{batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth});
if (batch == 0) {
gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth});
input = input.view({nInputPlane, inputHeight, inputWidth});
gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth});
offset = offset.view({offset.size(1), offset.size(2), offset.size(3)});
gradOffset =
gradOffset.view({offset.size(1), offset.size(2), offset.size(3)});
}
}
void DeformConvBackwardParametersCUDAKernelLauncher(
Tensor input, Tensor offset, Tensor gradOutput, Tensor gradWeight,
Tensor columns, Tensor ones, int kW, int kH, int dW, int dH, int padW,
int padH, int dilationW, int dilationH, int group, int deformable_group,
float scale, int im2col_step) {
// todo: transpose and reshape outGrad
// todo: reshape columns
// todo: add im2col_step as input
deform_conv_shape_check(input, offset, &gradOutput, gradWeight, kH, kW, dH,
dW, padH, padW, dilationH, dilationW, group,
deformable_group);
at::DeviceGuard guard(input.device());
int batch = 1;
if (input.ndimension() == 3) {
// Force batch
batch = 0;
input = input.view(
at::IntList({1, input.size(0), input.size(1), input.size(2)}));
gradOutput = gradOutput.view(
{1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)});
}
long batchSize = input.size(0);
long nInputPlane = input.size(1);
long inputHeight = input.size(2);
long inputWidth = input.size(3);
long nOutputPlane = gradWeight.size(0);
long outputWidth =
(inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
long outputHeight =
(inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset");
columns = at::zeros(
{nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth},
input.options());
gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step,
nOutputPlane, outputHeight, outputWidth});
gradOutput.transpose_(1, 2);
Tensor gradOutputBuffer = at::zeros_like(gradOutput);
gradOutputBuffer =
gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, im2col_step,
outputHeight, outputWidth});
gradOutputBuffer = gradOutputBuffer.contiguous();
gradOutputBuffer.copy_(gradOutput);
gradOutputBuffer =
gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane,
im2col_step * outputHeight, outputWidth});
gradOutput.transpose_(1, 2);
gradOutput =
gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth});
input = input.view({batchSize / im2col_step, im2col_step, nInputPlane,
inputHeight, inputWidth});
offset =
offset.view({batchSize / im2col_step, im2col_step,
deformable_group * 2 * kH * kW, outputHeight, outputWidth});
for (int elt = 0; elt < batchSize / im2col_step; elt++) {
deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight,
inputWidth, kH, kW, padH, padW, dH, dW, dilationH,
dilationW, im2col_step, deformable_group, columns);
// divide into group
gradOutputBuffer = gradOutputBuffer.view(
{gradOutputBuffer.size(0), group, gradOutputBuffer.size(1) / group,
gradOutputBuffer.size(2), gradOutputBuffer.size(3)});
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
gradWeight =
gradWeight.view({group, gradWeight.size(0) / group, gradWeight.size(1),
gradWeight.size(2), gradWeight.size(3)});
for (int g = 0; g < group; g++) {
gradWeight[g] = gradWeight[g]
.flatten(1)
.addmm_(gradOutputBuffer[elt][g].flatten(1),
columns[g].transpose(1, 0), 1.0, scale)
.view_as(gradWeight[g]);
}
gradOutputBuffer = gradOutputBuffer.view(
{gradOutputBuffer.size(0),
gradOutputBuffer.size(1) * gradOutputBuffer.size(2),
gradOutputBuffer.size(3), gradOutputBuffer.size(4)});
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
gradWeight = gradWeight.view({gradWeight.size(0) * gradWeight.size(1),
gradWeight.size(2), gradWeight.size(3),
gradWeight.size(4)});
}
input = input.view({batchSize, nInputPlane, inputHeight, inputWidth});
offset = offset.view(
{batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth});
if (batch == 0) {
gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth});
input = input.view({nInputPlane, inputHeight, inputWidth});
}
}
|
4835e16554208cfad51de5973ec5a13b2aad987f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "projektcuda.h"
#include "project_comm.h"
//#include "mex.h"
/* Kernel to square elements of the array on the GPU */
__global__ void device_dotMul(t_ve* in1, t_ve* in2,t_ve* out, unsigned int N)
{
//define a Result Vector for each block
__shared__ float Cs[VECTOR_BLOCK_SIZE];
// get a thread indentifier
int idx = blockIdx.x*blockDim.x+threadIdx.x;
//initialise Cs
Cs[threadIdx.x] = 0;
// compute scalar product
if ( idx < N ) {
Cs[threadIdx.x] = in1[ idx ] * in2[ idx ];
}
//blocksum ist thread memory.
t_ve blocksum = 0;
//initialize output vector for each block
if(threadIdx.x==0){
out[blockIdx.x]=0;
}
__syncthreads();
//compute summe of all thread's results for each block
if(threadIdx.x==0){
for ( int i = 0; i < blockDim.x; i++ ) {
blocksum += Cs[i];
}
out[blockIdx.x]=blocksum;
}
__syncthreads();
//compute the sume of all block's result for the grid
if ( idx == 0 ) {
for ( int i = 1; i < gridDim.x; i++ ) {
out[0] += out[i];
}
}
} | 4835e16554208cfad51de5973ec5a13b2aad987f.cu | #include "cuda.h"
#include <stdio.h>
#include "projektcuda.h"
#include "project_comm.h"
//#include "mex.h"
/* Kernel to square elements of the array on the GPU */
__global__ void device_dotMul(t_ve* in1, t_ve* in2,t_ve* out, unsigned int N)
{
//define a Result Vector for each block
__shared__ float Cs[VECTOR_BLOCK_SIZE];
// get a thread indentifier
int idx = blockIdx.x*blockDim.x+threadIdx.x;
//initialise Cs
Cs[threadIdx.x] = 0;
// compute scalar product
if ( idx < N ) {
Cs[threadIdx.x] = in1[ idx ] * in2[ idx ];
}
//blocksum ist thread memory.
t_ve blocksum = 0;
//initialize output vector for each block
if(threadIdx.x==0){
out[blockIdx.x]=0;
}
__syncthreads();
//compute summe of all thread's results for each block
if(threadIdx.x==0){
for ( int i = 0; i < blockDim.x; i++ ) {
blocksum += Cs[i];
}
out[blockIdx.x]=blocksum;
}
__syncthreads();
//compute the sume of all block's result for the grid
if ( idx == 0 ) {
for ( int i = 1; i < gridDim.x; i++ ) {
out[0] += out[i];
}
}
} |
5115c063c3d2d8c2315532134a5af27022ed930a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "math.h"
#include "stdio.h"
#include "stdlib.h"
#define RADIUS 3
#define BLOCK_SIZE 512
#define MAX_GRID_WIDTH 65535
int checkResults(int startElem, int endElem, float *cudaRes, float *res) {
int nDiffs = 0;
const float smallVal = 0.000001f;
for (int i = startElem; i < endElem; i++)
if (fabs(cudaRes[i] - res[i]) > smallVal) nDiffs++;
return nDiffs;
}
void initializeWeights(float *weights) {
// Hardcoded for RADIUS = 3
weights[0] = 0.50f;
weights[1] = 0.75f;
weights[2] = 1.25f;
weights[3] = 2.00f;
weights[4] = 1.25f;
weights[5] = 0.75f;
weights[6] = 0.50f;
}
void initializeArray(float *arr, int nElements) {
const int myMinNumber = -5;
const int myMaxNumber = 5;
srand(time(NULL));
for (int i = 0; i < nElements; i++)
arr[i] = (float)(rand() % (myMaxNumber - myMinNumber + 1) + myMinNumber);
}
void applyStencil1D_SEQ(int sIdx, int eIdx, const float *weights, float *in,
float *out) {
for (int i = sIdx; i < eIdx; i++) {
out[i] = 0.f;
out[i] += weights[0] * in[i - RADIUS];
out[i] += weights[1] * in[i - RADIUS + 1];
out[i] += weights[2] * in[i - RADIUS + 2];
out[i] += weights[3] * in[i - RADIUS + 3];
out[i] += weights[4] * in[i - RADIUS + 4];
out[i] += weights[5] * in[i - RADIUS + 5];
out[i] += weights[6] * in[i - RADIUS + 6];
out[i] /= 7.f;
}
}
__global__ void applyStencil1D_V4(int sIdx, int eIdx, const float *weights,
float *in, float *out) {
int i = sIdx + (blockIdx.x * blockDim.x + threadIdx.x) +
blockDim.x * gridDim.x * blockIdx.y;
if (i >= eIdx) return;
float result = 0.f;
result += weights[0] * in[i - 3];
result += weights[1] * in[i - 2];
result += weights[2] * in[i - 1];
result += weights[3] * in[i];
result += weights[4] * in[i + 1];
result += weights[5] * in[i + 2];
result += weights[6] * in[i + 3];
result /= 7.f;
out[i] = result;
}
__global__ void applyStencil1D_V5(int sIdx, int eIdx, const float *weights,
float *in, float *out) {
extern __shared__ float sdata[];
int i = sIdx + (blockIdx.x * blockDim.x + threadIdx.x) +
blockDim.x * gridDim.x * blockIdx.y;
// Read into shared memory
sdata[threadIdx.x + RADIUS] = in[i];
if (threadIdx.x < RADIUS) {
sdata[threadIdx.x] = in[i - RADIUS];
sdata[threadIdx.x + RADIUS + BLOCK_SIZE] = in[i + BLOCK_SIZE];
}
__syncthreads();
if (i >= eIdx) return;
// Calculate result
float result = 0.f;
result += weights[0] * sdata[threadIdx.x];
result += weights[1] * sdata[threadIdx.x + 1];
result += weights[2] * sdata[threadIdx.x + 2];
result += weights[3] * sdata[threadIdx.x + 3];
result += weights[4] * sdata[threadIdx.x + 4];
result += weights[5] * sdata[threadIdx.x + 5];
result += weights[6] * sdata[threadIdx.x + 6];
result /= 7.f;
out[i] = result;
}
int main(int argc, char *argv[]) {
int version, N;
float dur_max;
if (argc == 4) {
version = atoi(argv[1]);
N = atoi(argv[2]);
dur_max = atof(argv[3]) * 1000;
if (dur_max == 0.f) dur_max = 1e-30;
}
if (argc != 4 || (version != 4 && version != 5 && version != 6)) {
printf("Usage: ./p1 <kernel_version> <log10(N)> <time (s)>\n");
printf("Allowed versions: 4, 5, 6\n");
printf("Entering a time of zero will produce 1 run\n");
return 0;
}
int wsize = (2 * RADIUS + 1) * sizeof(float);
int size = N * sizeof(float);
// Setup timing
float dur_ex, dur_in, dur_cpu;
float dur_ex_total = 0.f;
float dur_in_total = 0.f;
float dur_cpu_total = 0.f;
int num_runs_gpu = 0;
int num_runs_cpu = 0;
// Allocate host resources
float *weights, *in, *out, *cuda_out;
if (version == 4 || version == 5) {
weights = (float *)malloc(wsize);
in = (float *)malloc(size);
out = (float *)malloc(size);
cuda_out = (float *)malloc(size);
} else if (version == 6) {
hipHostMalloc(&weights, wsize);
hipHostMalloc(&in, size);
hipHostMalloc(&out, size);
hipHostMalloc(&cuda_out, size);
}
// Allocate device resources
float *d_weights, *d_in, *d_out;
hipMalloc(&d_weights, wsize);
hipMalloc(&d_in, size);
hipMalloc(&d_out, size);
// Fill weights and array
initializeWeights(weights);
initializeArray(in, N);
// Setup grid
dim3 dimBlock, dimGrid;
dimBlock.x = BLOCK_SIZE;
int num_grids = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
int shared_size = (BLOCK_SIZE + 2 * RADIUS) * sizeof(float);
if (num_grids <= MAX_GRID_WIDTH) {
dimGrid.x = num_grids;
dimGrid.y = 1;
} else {
dimGrid.x = MAX_GRID_WIDTH;
dimGrid.y = (num_grids + MAX_GRID_WIDTH - 1) / MAX_GRID_WIDTH;
}
printf("Version: %u\n", version);
printf("N: %lu\n", N);
printf("Block size: %dx%d\n", dimBlock.y, dimBlock.x);
printf("Grid size: %dx%d\n", dimGrid.y, dimGrid.x);
while (dur_in_total < dur_max) {
num_runs_gpu += 1;
// Setup timing
hipEvent_t start_ex, end_ex, start_in, end_in;
hipEventCreate(&start_ex);
hipEventCreate(&end_ex);
hipEventCreate(&start_in);
hipEventCreate(&end_in);
// Start inclusive timing
hipEventRecord(start_in, 0);
// Copy to device
hipMemcpy(d_weights, weights, wsize, hipMemcpyHostToDevice);
hipMemcpy(d_in, in, size, hipMemcpyHostToDevice);
// Start exclusive timing
hipEventRecord(start_ex, 0);
// Execute kernel
if (version == 4)
hipLaunchKernelGGL(( applyStencil1D_V4) , dim3(dimGrid), dim3(dimBlock), 0, 0,
RADIUS, N - RADIUS, d_weights, d_in, d_out);
else if (version == 5 || version == 6)
hipLaunchKernelGGL(( applyStencil1D_V5) , dim3(dimGrid), dim3(dimBlock), shared_size, 0,
RADIUS, N - RADIUS, d_weights, d_in, d_out);
// End exclusive timing
hipEventRecord(end_ex, 0);
hipEventSynchronize(end_ex);
// Copy from device
hipMemcpy(cuda_out, d_out, size, hipMemcpyDeviceToHost);
// End inclusive timing
hipEventRecord(end_in, 0);
hipEventSynchronize(end_in);
// Calculate durations
hipEventElapsedTime(&dur_ex, start_ex, end_ex);
hipEventElapsedTime(&dur_in, start_in, end_in);
dur_ex_total += dur_ex;
dur_in_total += dur_in;
}
dur_ex = dur_ex_total / num_runs_gpu;
dur_in = dur_in_total / num_runs_gpu;
printf("Num runs GPU: %u\n", num_runs_gpu);
printf("GPU execution time (exclusive): %15.6f ms\n", dur_ex);
printf("GPU execution time (inclusive): %15.6f ms\n", dur_in);
while (dur_cpu_total < dur_max) {
num_runs_cpu += 1;
// Setup timing
hipEvent_t start_cpu, end_cpu;
hipEventCreate(&start_cpu);
hipEventCreate(&end_cpu);
// Run on CPU
hipEventRecord(start_cpu, 0);
applyStencil1D_SEQ(RADIUS, N - RADIUS, weights, in, out);
hipEventRecord(end_cpu, 0);
hipEventSynchronize(end_cpu);
hipEventElapsedTime(&dur_cpu, start_cpu, end_cpu);
dur_cpu_total += dur_cpu;
}
dur_cpu = dur_cpu_total / num_runs_cpu;
printf("Num runs CPU: %u\n", num_runs_cpu);
printf("CPU execution time: %15.6f ms\n", dur_cpu);
// Compare GPU result to CPU result
int nDiffs = checkResults(RADIUS, N - RADIUS, cuda_out, out);
if (nDiffs == 0)
printf("Looks good.\n");
else
printf("Doesn't look good: %d differences\n", nDiffs);
printf("\n");
// Free resources
if (version == 4 || version == 5) {
free(weights);
free(in);
free(out);
free(cuda_out);
} else if (version == 6) {
hipFree(weights);
hipFree(in);
hipFree(out);
hipFree(cuda_out);
}
hipFree(d_weights);
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 5115c063c3d2d8c2315532134a5af27022ed930a.cu | #include "cuda.h"
#include "math.h"
#include "stdio.h"
#include "stdlib.h"
#define RADIUS 3
#define BLOCK_SIZE 512
#define MAX_GRID_WIDTH 65535
int checkResults(int startElem, int endElem, float *cudaRes, float *res) {
int nDiffs = 0;
const float smallVal = 0.000001f;
for (int i = startElem; i < endElem; i++)
if (fabs(cudaRes[i] - res[i]) > smallVal) nDiffs++;
return nDiffs;
}
void initializeWeights(float *weights) {
// Hardcoded for RADIUS = 3
weights[0] = 0.50f;
weights[1] = 0.75f;
weights[2] = 1.25f;
weights[3] = 2.00f;
weights[4] = 1.25f;
weights[5] = 0.75f;
weights[6] = 0.50f;
}
void initializeArray(float *arr, int nElements) {
const int myMinNumber = -5;
const int myMaxNumber = 5;
srand(time(NULL));
for (int i = 0; i < nElements; i++)
arr[i] = (float)(rand() % (myMaxNumber - myMinNumber + 1) + myMinNumber);
}
void applyStencil1D_SEQ(int sIdx, int eIdx, const float *weights, float *in,
float *out) {
for (int i = sIdx; i < eIdx; i++) {
out[i] = 0.f;
out[i] += weights[0] * in[i - RADIUS];
out[i] += weights[1] * in[i - RADIUS + 1];
out[i] += weights[2] * in[i - RADIUS + 2];
out[i] += weights[3] * in[i - RADIUS + 3];
out[i] += weights[4] * in[i - RADIUS + 4];
out[i] += weights[5] * in[i - RADIUS + 5];
out[i] += weights[6] * in[i - RADIUS + 6];
out[i] /= 7.f;
}
}
__global__ void applyStencil1D_V4(int sIdx, int eIdx, const float *weights,
float *in, float *out) {
int i = sIdx + (blockIdx.x * blockDim.x + threadIdx.x) +
blockDim.x * gridDim.x * blockIdx.y;
if (i >= eIdx) return;
float result = 0.f;
result += weights[0] * in[i - 3];
result += weights[1] * in[i - 2];
result += weights[2] * in[i - 1];
result += weights[3] * in[i];
result += weights[4] * in[i + 1];
result += weights[5] * in[i + 2];
result += weights[6] * in[i + 3];
result /= 7.f;
out[i] = result;
}
__global__ void applyStencil1D_V5(int sIdx, int eIdx, const float *weights,
float *in, float *out) {
extern __shared__ float sdata[];
int i = sIdx + (blockIdx.x * blockDim.x + threadIdx.x) +
blockDim.x * gridDim.x * blockIdx.y;
// Read into shared memory
sdata[threadIdx.x + RADIUS] = in[i];
if (threadIdx.x < RADIUS) {
sdata[threadIdx.x] = in[i - RADIUS];
sdata[threadIdx.x + RADIUS + BLOCK_SIZE] = in[i + BLOCK_SIZE];
}
__syncthreads();
if (i >= eIdx) return;
// Calculate result
float result = 0.f;
result += weights[0] * sdata[threadIdx.x];
result += weights[1] * sdata[threadIdx.x + 1];
result += weights[2] * sdata[threadIdx.x + 2];
result += weights[3] * sdata[threadIdx.x + 3];
result += weights[4] * sdata[threadIdx.x + 4];
result += weights[5] * sdata[threadIdx.x + 5];
result += weights[6] * sdata[threadIdx.x + 6];
result /= 7.f;
out[i] = result;
}
int main(int argc, char *argv[]) {
int version, N;
float dur_max;
if (argc == 4) {
version = atoi(argv[1]);
N = atoi(argv[2]);
dur_max = atof(argv[3]) * 1000;
if (dur_max == 0.f) dur_max = 1e-30;
}
if (argc != 4 || (version != 4 && version != 5 && version != 6)) {
printf("Usage: ./p1 <kernel_version> <log10(N)> <time (s)>\n");
printf("Allowed versions: 4, 5, 6\n");
printf("Entering a time of zero will produce 1 run\n");
return 0;
}
int wsize = (2 * RADIUS + 1) * sizeof(float);
int size = N * sizeof(float);
// Setup timing
float dur_ex, dur_in, dur_cpu;
float dur_ex_total = 0.f;
float dur_in_total = 0.f;
float dur_cpu_total = 0.f;
int num_runs_gpu = 0;
int num_runs_cpu = 0;
// Allocate host resources
float *weights, *in, *out, *cuda_out;
if (version == 4 || version == 5) {
weights = (float *)malloc(wsize);
in = (float *)malloc(size);
out = (float *)malloc(size);
cuda_out = (float *)malloc(size);
} else if (version == 6) {
cudaMallocHost(&weights, wsize);
cudaMallocHost(&in, size);
cudaMallocHost(&out, size);
cudaMallocHost(&cuda_out, size);
}
// Allocate device resources
float *d_weights, *d_in, *d_out;
cudaMalloc(&d_weights, wsize);
cudaMalloc(&d_in, size);
cudaMalloc(&d_out, size);
// Fill weights and array
initializeWeights(weights);
initializeArray(in, N);
// Setup grid
dim3 dimBlock, dimGrid;
dimBlock.x = BLOCK_SIZE;
int num_grids = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
int shared_size = (BLOCK_SIZE + 2 * RADIUS) * sizeof(float);
if (num_grids <= MAX_GRID_WIDTH) {
dimGrid.x = num_grids;
dimGrid.y = 1;
} else {
dimGrid.x = MAX_GRID_WIDTH;
dimGrid.y = (num_grids + MAX_GRID_WIDTH - 1) / MAX_GRID_WIDTH;
}
printf("Version: %u\n", version);
printf("N: %lu\n", N);
printf("Block size: %dx%d\n", dimBlock.y, dimBlock.x);
printf("Grid size: %dx%d\n", dimGrid.y, dimGrid.x);
while (dur_in_total < dur_max) {
num_runs_gpu += 1;
// Setup timing
cudaEvent_t start_ex, end_ex, start_in, end_in;
cudaEventCreate(&start_ex);
cudaEventCreate(&end_ex);
cudaEventCreate(&start_in);
cudaEventCreate(&end_in);
// Start inclusive timing
cudaEventRecord(start_in, 0);
// Copy to device
cudaMemcpy(d_weights, weights, wsize, cudaMemcpyHostToDevice);
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
// Start exclusive timing
cudaEventRecord(start_ex, 0);
// Execute kernel
if (version == 4)
applyStencil1D_V4 <<<dimGrid, dimBlock>>>
(RADIUS, N - RADIUS, d_weights, d_in, d_out);
else if (version == 5 || version == 6)
applyStencil1D_V5 <<<dimGrid, dimBlock, shared_size>>>
(RADIUS, N - RADIUS, d_weights, d_in, d_out);
// End exclusive timing
cudaEventRecord(end_ex, 0);
cudaEventSynchronize(end_ex);
// Copy from device
cudaMemcpy(cuda_out, d_out, size, cudaMemcpyDeviceToHost);
// End inclusive timing
cudaEventRecord(end_in, 0);
cudaEventSynchronize(end_in);
// Calculate durations
cudaEventElapsedTime(&dur_ex, start_ex, end_ex);
cudaEventElapsedTime(&dur_in, start_in, end_in);
dur_ex_total += dur_ex;
dur_in_total += dur_in;
}
dur_ex = dur_ex_total / num_runs_gpu;
dur_in = dur_in_total / num_runs_gpu;
printf("Num runs GPU: %u\n", num_runs_gpu);
printf("GPU execution time (exclusive): %15.6f ms\n", dur_ex);
printf("GPU execution time (inclusive): %15.6f ms\n", dur_in);
while (dur_cpu_total < dur_max) {
num_runs_cpu += 1;
// Setup timing
cudaEvent_t start_cpu, end_cpu;
cudaEventCreate(&start_cpu);
cudaEventCreate(&end_cpu);
// Run on CPU
cudaEventRecord(start_cpu, 0);
applyStencil1D_SEQ(RADIUS, N - RADIUS, weights, in, out);
cudaEventRecord(end_cpu, 0);
cudaEventSynchronize(end_cpu);
cudaEventElapsedTime(&dur_cpu, start_cpu, end_cpu);
dur_cpu_total += dur_cpu;
}
dur_cpu = dur_cpu_total / num_runs_cpu;
printf("Num runs CPU: %u\n", num_runs_cpu);
printf("CPU execution time: %15.6f ms\n", dur_cpu);
// Compare GPU result to CPU result
int nDiffs = checkResults(RADIUS, N - RADIUS, cuda_out, out);
if (nDiffs == 0)
printf("Looks good.\n");
else
printf("Doesn't look good: %d differences\n", nDiffs);
printf("\n");
// Free resources
if (version == 4 || version == 5) {
free(weights);
free(in);
free(out);
free(cuda_out);
} else if (version == 6) {
cudaFree(weights);
cudaFree(in);
cudaFree(out);
cudaFree(cuda_out);
}
cudaFree(d_weights);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
bdc8227b817a1e83b68238b614efbedc8fd23dbc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define FAILURE 0
#define SUCCESS !FAILURE
#define USER_NAME "acr18by" //replace with your user name
typedef enum MODE { CPU, OPENMP, CUDA, ALL } MODE;
typedef enum OUTPUT_MODE { PPM_BINARY, PPM_PLAIN_TEXT } OUTPUT_MODE;
void print_help();
int process_command_line(int argc, char *argv[]);
unsigned char ** read_data(const char* fname);
unsigned char * gpu_cal(unsigned char *gpu_data);
unsigned char * gpu_cal_optimised(unsigned char *gpu_data);
void cpu_cal();
void openmp_cal();
int output(char * fname);
int c = 0;
unsigned int width = 0;
unsigned int height = 0;
unsigned char ** data;
char *in_file;
char ftype[2];
char *out_file;
int r, g, b;
MODE execution_mode = CPU;
OUTPUT_MODE output_mode = PPM_BINARY;
int main(int argc, char *argv[])
{
if (process_command_line(argc, argv) == FAILURE)
return 1;
//TODO: read input image file (either binary or plain text PPM)
printf("Reading data from %s \n", in_file);
data = read_data(in_file);
//TODO: execute the mosaic filter based on the mode
switch (execution_mode) {
case (CPU): {
// TODO: starting timing here
clock_t start = clock(), diff;
// TODO: calculate the average colour value
cpu_cal();
// Output the average colour value for the image
printf("CPU Average image colour red = %d, green = %d, blue = %d \n", r, g, b);
// TODO: end timing here
diff = clock() - start;
int msec = diff * 1000 / CLOCKS_PER_SEC;
printf("CPU mode execution time took %d s and %dms\n", msec / 1000, msec % 1000);
break;
}
case (OPENMP): {
//TODO: starting timing here
//clock_t start = clock(), diff;
//TODO: calculate the average colour value
//double begin, diff;
//begin = omp_get_wtime();
openmp_cal();
// Output the average colour value for the image
printf("OPENMP Average image colour red = %d, green = %d, blue = %d \n", r, g, b);
////TODO: end timing here
//diff = omp_get_wtime() - begin;
//int msec = diff * 1000;
//printf("OPENMP mode execution time took %d s and %dms\n", msec / 1000, msec % 1000);
break;
}
case (CUDA): {
hipEvent_t start, stop;
float milliseconds = 0;
unsigned char *gpu_data;
size_t size = height * width * 3 * sizeof(unsigned char);
gpu_data = (unsigned char *)malloc(size);
// transfer data from 2d to 1d
for (int i = 0, i_1d = 0; i < height; i++) {
for (int j = 0; j < width * 3; j++, i_1d++) {
*(gpu_data + i_1d) = *(*(data + i) + j);
}
}
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
gpu_data = gpu_cal(gpu_data);
//gpu_data = gpu_cal_optimised(gpu_data);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("CUDA Average image colour red = %d, green = %d, blue = %d \n", r, g, b);
printf("Execution time is %f ms\n", milliseconds);
//transfer data from 1d to 2d for output
for (int i = 0, i_1d = 0; i < height; i++) {
for (int j = 0; j < width * 3; j++, i_1d++) {
*(*(data + i) + j) = *(gpu_data + i_1d);
}
}
free(gpu_data);
output(out_file);
break;
}
case (ALL): {
//TODO
clock_t start = clock(), diff;
hipEvent_t c_start, c_stop;
float milliseconds = 0;
// CPU MODE
cpu_cal();
printf("\nCPU Average image colour red = %d, green = %d, blue = %d \n", r, g, b);
diff = clock() - start;
int msec = diff * 1000 / CLOCKS_PER_SEC;
printf("CPU mode execution time took %d s and %dms\n", msec / 1000, msec % 1000);
start = clock();
// OPENMP MODE
openmp_cal();
printf("\nOPENMP Average image colour red = %d, green = %d, blue = %d \n", r, g, b);
diff = clock() - start;
msec = diff * 1000 / CLOCKS_PER_SEC;
printf("OPENMP mode execution time took %d s and %dms\n", msec / 1000, msec % 1000);
// CUDA MODE
unsigned char *gpu_data;
size_t size = height * width * 3 * sizeof(unsigned char);
gpu_data = (unsigned char *)malloc(size);
// transfer data from 2d to 1d
for (int i = 0, i_1d = 0; i < height; i++) {
for (int j = 0; j < width * 3; j++, i_1d++) {
*(gpu_data + i_1d) = *(*(data + i) + j);
}
}
// CUDA TIME START HERE
hipEventCreate(&c_start);
hipEventCreate(&c_stop);
hipEventRecord(c_start);
gpu_data = gpu_cal(gpu_data);
//gpu_data = gpu_cal_optimised(gpu_data);
hipEventRecord(c_stop);
hipEventSynchronize(c_stop);
hipEventElapsedTime(&milliseconds, c_start, c_stop);
printf("\nCUDA mode execution time took %d s and %dms\n", (int)milliseconds / 1000, (int)milliseconds % 1000);
printf("CUDA Average image colour red = %d, green = %d, blue = %d \n", r, g, b);
//transfer data from 1d to 2d for output
for (int i = 0, i_1d = 0; i < height; i++) {
for (int j = 0; j < width * 3; j++, i_1d++) {
*(*(data + i) + j) = *(gpu_data + i_1d);
}
}
free(gpu_data);
output(out_file);
break;
}
}
free(data);
getchar();
return 0;
}
int process_command_line(int argc, char *argv[]) {
if (argc < 7) {
fprintf(stderr, "Error: Missing program arguments. Correct usage is...\n");
print_help();
return FAILURE;
}
//first argument is always the executable name
//read in the non optional command line arguments
c = atoi(argv[1]);
if (c <= 0) {
printf("The value of c is invalid.");
return FAILURE;
}
c = pow(2.0, (double)(int)log2(c)); // change the value of c to be valid
if (!strcmp(argv[2], "CPU")) { execution_mode = CPU; };
if (!strcmp(argv[2], "OPENMP")) { execution_mode = OPENMP; };
if (!strcmp(argv[2], "CUDA")) { execution_mode = CUDA; };
if (!strcmp(argv[2], "ALL")) { execution_mode = ALL; };
//TODO: read in the input image name
in_file = argv[4];
//TODO: read in the output image name
out_file = argv[6];
//TODO: read in any optional part 3 arguments
if (argc > 8) {
if (!strcmp(argv[8], "PPM_BINARY")) { output_mode = PPM_BINARY; };
if (!strcmp(argv[8], "PPM_PLAIN_TEXT")) { output_mode = PPM_PLAIN_TEXT; };
}
return SUCCESS;
}
void print_help() {
printf("mosaic_%s C M -i input_file -o output_file [options]\n", USER_NAME);
printf("where:\n");
printf("\tC Is the mosaic cell size which should be any positive\n"
"\t power of 2 number \n");
printf("\tM Is the mode with a value of either CPU, OPENMP, CUDA or\n"
"\t ALL. The mode specifies which version of the simulation\n"
"\t code should execute. ALL should execute each mode in\n"
"\t turn.\n");
printf("\t-i input_file Specifies an input image file\n");
printf("\t-o output_file Specifies an output image file which will be used\n"
"\t to write the mosaic image\n");
printf("[options]:\n");
printf("\t-f ppm_format PPM image output format either PPM_BINARY (default) or \n"
"\t PPM_PLAIN_TEXT\n ");
}
/* Read header information of the file*/
FILE *read_header(FILE *fp) {
char read_line[10];
while (1) {
// exit if reading to the end of file
if (fgets(read_line, sizeof(read_line), fp) == NULL) {
return FAILURE;
}
// exit if reading to the end line of header
if (strncmp(read_line, "255", 3) == 0) {
//size = str_cat(size, input);
break;
}
// file format (either P3 or P6)
if (strncmp(read_line, "P3", 2) == 0) {
strcpy(ftype, "P3");
}
else if (strncmp(read_line, "P6", 2) == 0) {
strcpy(ftype, "P6");
}
// skip if reading to command line
else if (strncmp(read_line, "#", 1) == 0) {
continue;
}
// first number is file width and sencond one is height
else {
//size = str_cat(size, input);
// width is not assigned
if (width == 0) {
width = atoi(read_line);
}
else {
height = atoi(read_line);
}
}
}
return fp;
}
/** Read data from the file and do pre-processing
Store the pixel data into the array and return the pointer of the array
*/
unsigned char **read_data(const char *fname) {
FILE* fp;
fp = fopen(fname, "rb");
if (fp == NULL) { perror(fname); return 0; }
// read header
fp = read_header(fp);
if (c > width || c > height) {
printf("\nThe value of c is invalide");
exit(0);
}
unsigned char **pixel_data = (unsigned char **)malloc(height * sizeof(unsigned char *)); // the memory allocate to store the pixel data
if (strcmp(ftype, "P3") == 0) {
for (int row = 0; row < height; row++) {
pixel_data[row] = (unsigned char *)malloc(width * 3 * sizeof(unsigned char));
}
unsigned char *term = (unsigned char *)malloc(sizeof(unsigned char) * 1);
int i = 0;
int row, col;
while (fscanf(fp, "%u", &term) == 1) {
row = i / (width * 3);
col = i % (width * 3);
(*(pixel_data + row))[col] = (unsigned char)term;
i++;
}
fclose(fp);
}
if (strcmp(ftype, "P6") == 0) {
int column, row, k;
unsigned char * buf = (unsigned char *)malloc(width*height * 3 * sizeof(unsigned char));
fread(buf, sizeof(unsigned char), width*height * 3, fp); // read all data from the file
for (row = 0, k = 0; row < height; row++) {
pixel_data[row] = (unsigned char *)malloc(width * 3 * sizeof(unsigned char));
for (column = 0; column < width * 3; column++, k++) {
*(*(pixel_data + row) + column) = (unsigned int)buf[k];
}
}
free(buf);
fclose(fp);
}
return pixel_data;
}
inline double log2(double n) {
return log(n) / log(2);
}
void cpu_cal() {
printf("CPU RUNNING\n");
int i, j, ci, cj; // for index
int r_ = 0, g_ = 0, b_ = 0; // to calculate the average rgb
int r_acc = 0, g_acc = 0, b_acc = 0; // accumulated rgb for each block
int rc = 0, gc = 0, bc = 0; // accumulated rgb for whole image
int i_c = c, j_c = c; // to solve the boundry overflow problem
int counter;
for (i = 0; i < height; i += c) { // row in image
for (j = 0; j < width * 3; j += 3 * c) { // column in image
for (ci = i, r_acc = 0, g_acc = 0, b_acc = 0, counter = 0; ci < i + c && ci < height; ci++) { // row in block
for (cj = j; cj < j + c * 3 && cj < width * 3; cj += 3, counter++) { // column in block
r_acc += *(*(data + ci) + cj + 0);
g_acc += *(*(data + ci) + cj + 1);
b_acc += *(*(data + ci) + cj + 2);
}
}
unsigned int
r_avg = r_acc / counter,
g_avg = g_acc / counter,
b_avg = b_acc / counter;
rc += r_acc;
gc += g_acc;
bc += b_acc;
for (ci = i; ci < i + c && ci < height; ci++) { // row in block
for (cj = j; cj < j + c * 3 && cj < width * 3; cj += 3) { // column in block
*(*(data + ci) + cj + 0) = r_avg;
*(*(data + ci) + cj + 1) = g_avg;
*(*(data + ci) + cj + 2) = b_avg;
}
}
r_ += r_avg;
g_ += g_avg;
b_ += b_avg;
}
}
r = rc / (width * height);
g = gc / (width * height);
b = bc / (width * height);
}
void openmp_cal() {
printf("OPENMP RUNNING\n");
int r_ = 0, g_ = 0, b_ = 0; // to calculate the average rgb
int rc = 0, gc = 0, bc = 0; // accumulated rgb for whole image
int i;
int r_acc, g_acc, b_acc; // accumulated rgb
#pragma omp parallel for reduction(+: r_ , g_ , b_)
for (i = 0; i < height; i += c) { // row in image
int j;
int ci, cj; // for index
int counter = 0;
int r_avg = 0, g_avg = 0, b_avg = 0;
#pragma omp parallel for reduction(+: rc , gc , bc,r_acc, g_acc, b_acc)
for (j = 0; j < width * 3; j += 3 * c) { // column in image
for (ci = i, r_acc = 0, g_acc = 0, b_acc = 0, counter = 0; ci < i + c && ci < height; ci++) { // row in block
for (cj = j; cj < j + c * 3 && cj < width * 3; cj += 3, counter++) { // column in block
r_acc += *(*(data + ci) + cj + 0);
g_acc += *(*(data + ci) + cj + 1);
b_acc += *(*(data + ci) + cj + 2);
}
}
r_avg = r_acc / counter;
g_avg = g_acc / counter;
b_avg = b_acc / counter;
rc += r_acc;
gc += g_acc;
bc += b_acc;
for (ci = i; ci < i + c && ci < height; ci++) { // row in block
for (cj = j; cj < j + c * 3 && cj < width * 3; cj += 3) { // column in block
*(*(data + ci) + cj + 0) = r_avg;
*(*(data + ci) + cj + 1) = g_avg;
*(*(data + ci) + cj + 2) = b_avg;
}
}
r_ += r_avg;
g_ += g_avg;
b_ += b_avg;
}
}
r = rc / (width * height);
g = gc / (width * height);
b = bc / (width * height);
}
/*Pixcel based add up value, per pixcel per thread*/
__global__
void add_up(unsigned char *data, int width, int height, int c, int new_width, int new_height, unsigned int * add_up_data, unsigned int * c_array, unsigned long long int * rgb_all) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < width*height) {
int loc_row = i / width;
int loc_col = i % width;
int loc_row_new = loc_row / c;
int loc_col_new = loc_col / c;
atomicAdd((add_up_data + (loc_row_new * new_width + loc_col_new) * 3 + 0), *(data + i * 3 + 0));
atomicAdd((add_up_data + (loc_row_new * new_width + loc_col_new) * 3 + 1), *(data + i * 3 + 1));
atomicAdd((add_up_data + (loc_row_new * new_width + loc_col_new) * 3 + 2), *(data + i * 3 + 2));
atomicAdd((c_array + (loc_row_new * new_width + loc_col_new)), 1); // to count how many pixcel in a mosic block
atomicAdd((rgb_all + 0), *(data + i * 3 + 0)); // to addup all rgb value
atomicAdd((rgb_all + 1), *(data + i * 3 + 1)); // to addup all rgb value
atomicAdd((rgb_all + 2), *(data + i * 3 + 2)); // to addup all rgb value
}
}
/*Mosaic based add up value, per mosic cell per block*/
__global__
void add_up_optimised(unsigned char *data, int width, int height, int c, int new_width, int new_height, unsigned int * add_up_data, unsigned int * c_array, unsigned long long int * rgb_all, int per_mosaic_block_num) {
__shared__ unsigned int r;
__shared__ unsigned int g;
__shared__ unsigned int b;
int i = (threadIdx.x / c + blockIdx.y*c)*width + (blockIdx.x*c + threadIdx.x % c);
// row * width + col
int MAXIMUM_WIDTH = c > 32 ? 32 : c;
int loc_row = i / width;
int loc_col = i % width;
int loc_row_new = loc_row / c;
int loc_col_new = loc_col / c;
int blockid = blockIdx.x + gridDim.x*blockIdx.y;
int cellid = blockIdx.x / per_mosaic_block_num + (blockIdx.y / per_mosaic_block_num)*(gridDim.x / per_mosaic_block_num);
int capacity = c * c;
if (per_mosaic_block_num > 1) {
if (blockIdx.x % per_mosaic_block_num == per_mosaic_block_num - 1) {
if (blockIdx.y % per_mosaic_block_num == per_mosaic_block_num - 1) {
capacity = c - (per_mosaic_block_num - 1) * 32;
capacity = capacity * capacity;
}
capacity = (c - (per_mosaic_block_num - 1) * 32) * 32;
}
}
if (threadIdx.x < capacity-1) {
printf("%d %d %d %d %d %d \n", i, cellid, blockid, *(data + i * 3 + 0), *(data + i * 3 + 1), *(data + i * 3 + 2));
atomicAdd(&r, *(data + i * 3 + 0));
atomicAdd(&g, *(data + i * 3 + 1));
atomicAdd(&b, *(data + i * 3 + 2));
}
__syncthreads();
if (threadIdx.x == 0) {
printf("---%d %d %d %d %d %d \n", i, cellid, blockid, r, g, b);
atomicAdd((add_up_data + cellid * 3 + 0), r);
atomicAdd((add_up_data + cellid * 3 + 1), g);
atomicAdd((add_up_data + cellid * 3 + 2), b);
atomicAdd((c_array + cellid), capacity); // to count how many pixcel in a mosic block
atomicAdd((rgb_all + 0), r); // to addup all rgb value
atomicAdd((rgb_all + 1), g); // to addup all rgb value
atomicAdd((rgb_all + 2), b); // to addup all rgb value
}
}
/*calculate the average value in mosaic cell and replace the original value by the value in mosaic cell*/
__global__
void avg(unsigned char * data, int width, int height, int c, int new_width, int new_height, unsigned int * add_up_data, unsigned int * c_array) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < width*height) {
int loc_row = i / width;
int loc_col = i % width;
int loc_row_new = loc_row / c;
int loc_col_new = loc_col / c;
*(data + i * 3 + 0) = (*(add_up_data + (loc_row_new * new_width + loc_col_new) * 3 + 0) / *(c_array + (loc_row_new * new_width + loc_col_new)));
*(data + i * 3 + 1) = (*(add_up_data + (loc_row_new * new_width + loc_col_new) * 3 + 1) / *(c_array + (loc_row_new * new_width + loc_col_new)));
*(data + i * 3 + 2) = (*(add_up_data + (loc_row_new * new_width + loc_col_new) * 3 + 2) / *(c_array + (loc_row_new * new_width + loc_col_new)));
}
}
unsigned char * gpu_cal(unsigned char *gpu_data) {
size_t size = height * width * 3 * sizeof(unsigned char);
int add_up_data_width;
int add_up_data_height;
unsigned int *add_up_data_dev, *add_up_data_host; // to calculate the total rgb value in a mosic cell
unsigned int *c_array_dev, *c_array_host; // to count how many pixels in a mosic cell
unsigned char *data_1d_dev; // image data
unsigned long long int *rgb_all_dev, *rgb_all_host; // all rgb value addup
const int BLOCK_SIZE = 512;
add_up_data_width = width % c == 0 ? width / c : (width / c + 1);
add_up_data_height = height % c == 0 ? height / c : (height / c + 1);
add_up_data_host = (unsigned int *)malloc(add_up_data_width * add_up_data_height * 3 * sizeof(unsigned int));
c_array_host = (unsigned int *)malloc(add_up_data_width * add_up_data_height * sizeof(unsigned int));
rgb_all_host = (unsigned long long int *)malloc(3 * sizeof(unsigned long long int));
hipMalloc(&data_1d_dev, size);
hipMalloc(&add_up_data_dev, add_up_data_width * add_up_data_height * 3 * sizeof(unsigned int));
hipMalloc(&c_array_dev, add_up_data_width * add_up_data_height * sizeof(int));
hipMalloc(&rgb_all_dev, 3 * sizeof(unsigned long long int));
hipMemcpy(data_1d_dev, gpu_data, size, hipMemcpyHostToDevice);
// excutive addup kernel function
add_up << < ((size / 3) / BLOCK_SIZE) > 0 ? (size / 3) / BLOCK_SIZE : 1, BLOCK_SIZE >> > (data_1d_dev, width, height, c, add_up_data_width, add_up_data_height, add_up_data_dev, c_array_dev, rgb_all_dev);
// excutive average kernel function
avg << < ((size / 3) / BLOCK_SIZE) > 0 ? (size / 3) / BLOCK_SIZE : 1, BLOCK_SIZE >> > (data_1d_dev, width, height, c, add_up_data_width, add_up_data_height, add_up_data_dev, c_array_dev);
hipMemcpy(rgb_all_host, rgb_all_dev, 3 * sizeof(unsigned long long int), hipMemcpyDeviceToHost);
hipMemcpy(gpu_data, data_1d_dev, width * height * 3 * sizeof(unsigned char), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
r = *(rgb_all_host + 0) / width / height;
g = *(rgb_all_host + 1) / width / height;
b = *(rgb_all_host + 2) / width / height;
hipFree(add_up_data_dev);
free(add_up_data_host);
hipFree(c_array_dev);
free(c_array_host);
hipFree(data_1d_dev);
hipFree(rgb_all_dev);
free(rgb_all_host);
return gpu_data;
}
unsigned char * gpu_cal_optimised(unsigned char *gpu_data) {
unsigned int *add_up_data_dev, *add_up_data_host; // to calculate the total rgb value in a mosic cell
unsigned int *c_array_dev, *c_array_host; // to count how many pixels in a mosic cell
unsigned char *data_1d_dev; // image data
unsigned long long int *rgb_all_dev, *rgb_all_host; // all rgb value addup
const int BLOCK_SIZE = 512;
int add_up_data_width = width % c == 0 ? width / c : (width / c + 1);
int add_up_data_height = height % c == 0 ? height / c : (height / c + 1);
int BLOCKBIM = c > 32 ? 32 : c; // maximun is 32
int BLOCK_PER_MOSAIC = c / ( BLOCKBIM + 1 ) + 1;
dim3 block(BLOCKBIM*BLOCKBIM, 1, 1);
dim3 grid(BLOCK_PER_MOSAIC * add_up_data_width, BLOCK_PER_MOSAIC * add_up_data_height, 1);
size_t size = height * width * 3 * sizeof(unsigned char);
add_up_data_host = (unsigned int *)malloc(add_up_data_width * add_up_data_height * 3 * sizeof(unsigned int));
c_array_host = (unsigned int *)malloc(add_up_data_width * add_up_data_height * sizeof(unsigned int));
rgb_all_host = (unsigned long long int *)malloc(3 * sizeof(unsigned long long int));
hipMalloc(&data_1d_dev, size);
hipMalloc(&add_up_data_dev, add_up_data_width * add_up_data_height * 3 * sizeof(unsigned int));
hipMalloc(&c_array_dev, add_up_data_width * add_up_data_height * sizeof(int));
hipMalloc(&rgb_all_dev, 3 * sizeof(unsigned long long int));
hipMemcpy(data_1d_dev, gpu_data, size, hipMemcpyHostToDevice);
add_up_optimised << < grid, block >> > (data_1d_dev, width, height, c, add_up_data_width, add_up_data_height, add_up_data_dev, c_array_dev, rgb_all_dev, BLOCK_PER_MOSAIC);
avg << < ((size / 3) / BLOCK_SIZE) > 0 ? (size / 3) / BLOCK_SIZE : 1, BLOCK_SIZE >> > (data_1d_dev, width, height, c, add_up_data_width, add_up_data_height, add_up_data_dev, c_array_dev);
hipDeviceSynchronize();
hipMemcpy(rgb_all_host, rgb_all_dev, 3 * sizeof(unsigned long long int), hipMemcpyDeviceToHost);
hipMemcpy(c_array_host, c_array_dev, add_up_data_width * add_up_data_height * sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(gpu_data, data_1d_dev, width * height * 3 * sizeof(unsigned char), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
r = *(rgb_all_host + 0) / width / height;
g = *(rgb_all_host + 1) / width / height;
b = *(rgb_all_host + 2) / width / height;
hipFree(add_up_data_dev);
free(add_up_data_host);
hipFree(c_array_dev);
free(c_array_host);
hipFree(data_1d_dev);
hipFree(rgb_all_dev);
free(rgb_all_host);
return gpu_data;
}
int output(char * fname) {
FILE* fp;
int row, column, p_i, index, i;
char* all_data;
unsigned char* bin_data;
char str_buf[10];
char* char_num = (char*)malloc(4);
int s = 0;
printf("\nStart writing---------------\n");
switch (output_mode) {
case(PPM_PLAIN_TEXT):
fp = fopen(fname, "w");
fputs("P3\n", fp);
fputs("# COM6521 Assignment test output\n", fp);
sprintf(str_buf, "%d\n", width);
fputs(str_buf, fp);
sprintf(str_buf, "%d\n", height);
fputs(str_buf, fp);
sprintf(str_buf, "%d\n", 255);
fputs(str_buf, fp);
// format all data into string and write it into file
all_data = (char *)malloc(width*height * 13 * sizeof(char));
memset(all_data, '\0', width*height * 13 * sizeof(char));
for (row = 0, p_i = 0, index = 0; row < height; row++, p_i++, index++) {
for (column = 0; column < width * 3; column++, i = 0, index++) { // process number by number
sprintf(char_num, "%d\0", *(*(data + row) + column));
for (i = 0; *(char_num + i) != '\0' && i < 3; i++, index++) {
*(all_data + index) = *(char_num + i);
}
if (p_i == 3) {
*(all_data + index) = '\t';
p_i = 0;
}
else {
*(all_data + index) = ' ';
}
}
*(all_data + index) = '\n';
}
fputs(all_data, fp);
free(all_data);
fclose(fp);
break;
case(PPM_BINARY):
fp = fopen(fname, "wb");
fputs("P6\n", fp);
fputs("# COM6521 Assignment test output\n", fp);
sprintf(str_buf, "%d\n", width);
fputs(str_buf, fp);
sprintf(str_buf, "%d\n", height);
fputs(str_buf, fp);
sprintf(str_buf, "%d\n", 255);
fputs(str_buf, fp);
bin_data = (unsigned char*)malloc(width*height * 3 * sizeof(unsigned char));
for (row = 0, index = 0; row < height; row++) {
for (column = 0; column < width * 3; column++, index++) {
*(bin_data + index) = (unsigned char)*(*(data + row) + column);
}
}
fwrite(bin_data, sizeof(unsigned char), width*height * 3 * sizeof(unsigned char), fp);
fclose(fp);
free(bin_data);
break;
}
printf("The file has been saved as %s", out_file);
return SUCCESS;
}
| bdc8227b817a1e83b68238b614efbedc8fd23dbc.cu |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define FAILURE 0
#define SUCCESS !FAILURE
#define USER_NAME "acr18by" //replace with your user name
typedef enum MODE { CPU, OPENMP, CUDA, ALL } MODE;
typedef enum OUTPUT_MODE { PPM_BINARY, PPM_PLAIN_TEXT } OUTPUT_MODE;
void print_help();
int process_command_line(int argc, char *argv[]);
unsigned char ** read_data(const char* fname);
unsigned char * gpu_cal(unsigned char *gpu_data);
unsigned char * gpu_cal_optimised(unsigned char *gpu_data);
void cpu_cal();
void openmp_cal();
int output(char * fname);
int c = 0;
unsigned int width = 0;
unsigned int height = 0;
unsigned char ** data;
char *in_file;
char ftype[2];
char *out_file;
int r, g, b;
MODE execution_mode = CPU;
OUTPUT_MODE output_mode = PPM_BINARY;
int main(int argc, char *argv[])
{
if (process_command_line(argc, argv) == FAILURE)
return 1;
//TODO: read input image file (either binary or plain text PPM)
printf("Reading data from %s \n", in_file);
data = read_data(in_file);
//TODO: execute the mosaic filter based on the mode
switch (execution_mode) {
case (CPU): {
// TODO: starting timing here
clock_t start = clock(), diff;
// TODO: calculate the average colour value
cpu_cal();
// Output the average colour value for the image
printf("CPU Average image colour red = %d, green = %d, blue = %d \n", r, g, b);
// TODO: end timing here
diff = clock() - start;
int msec = diff * 1000 / CLOCKS_PER_SEC;
printf("CPU mode execution time took %d s and %dms\n", msec / 1000, msec % 1000);
break;
}
case (OPENMP): {
//TODO: starting timing here
//clock_t start = clock(), diff;
//TODO: calculate the average colour value
//double begin, diff;
//begin = omp_get_wtime();
openmp_cal();
// Output the average colour value for the image
printf("OPENMP Average image colour red = %d, green = %d, blue = %d \n", r, g, b);
////TODO: end timing here
//diff = omp_get_wtime() - begin;
//int msec = diff * 1000;
//printf("OPENMP mode execution time took %d s and %dms\n", msec / 1000, msec % 1000);
break;
}
case (CUDA): {
cudaEvent_t start, stop;
float milliseconds = 0;
unsigned char *gpu_data;
size_t size = height * width * 3 * sizeof(unsigned char);
gpu_data = (unsigned char *)malloc(size);
// transfer data from 2d to 1d
for (int i = 0, i_1d = 0; i < height; i++) {
for (int j = 0; j < width * 3; j++, i_1d++) {
*(gpu_data + i_1d) = *(*(data + i) + j);
}
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
gpu_data = gpu_cal(gpu_data);
//gpu_data = gpu_cal_optimised(gpu_data);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("CUDA Average image colour red = %d, green = %d, blue = %d \n", r, g, b);
printf("Execution time is %f ms\n", milliseconds);
//transfer data from 1d to 2d for output
for (int i = 0, i_1d = 0; i < height; i++) {
for (int j = 0; j < width * 3; j++, i_1d++) {
*(*(data + i) + j) = *(gpu_data + i_1d);
}
}
free(gpu_data);
output(out_file);
break;
}
case (ALL): {
//TODO
clock_t start = clock(), diff;
cudaEvent_t c_start, c_stop;
float milliseconds = 0;
// CPU MODE
cpu_cal();
printf("\nCPU Average image colour red = %d, green = %d, blue = %d \n", r, g, b);
diff = clock() - start;
int msec = diff * 1000 / CLOCKS_PER_SEC;
printf("CPU mode execution time took %d s and %dms\n", msec / 1000, msec % 1000);
start = clock();
// OPENMP MODE
openmp_cal();
printf("\nOPENMP Average image colour red = %d, green = %d, blue = %d \n", r, g, b);
diff = clock() - start;
msec = diff * 1000 / CLOCKS_PER_SEC;
printf("OPENMP mode execution time took %d s and %dms\n", msec / 1000, msec % 1000);
// CUDA MODE
unsigned char *gpu_data;
size_t size = height * width * 3 * sizeof(unsigned char);
gpu_data = (unsigned char *)malloc(size);
// transfer data from 2d to 1d
for (int i = 0, i_1d = 0; i < height; i++) {
for (int j = 0; j < width * 3; j++, i_1d++) {
*(gpu_data + i_1d) = *(*(data + i) + j);
}
}
// CUDA TIME START HERE
cudaEventCreate(&c_start);
cudaEventCreate(&c_stop);
cudaEventRecord(c_start);
gpu_data = gpu_cal(gpu_data);
//gpu_data = gpu_cal_optimised(gpu_data);
cudaEventRecord(c_stop);
cudaEventSynchronize(c_stop);
cudaEventElapsedTime(&milliseconds, c_start, c_stop);
printf("\nCUDA mode execution time took %d s and %dms\n", (int)milliseconds / 1000, (int)milliseconds % 1000);
printf("CUDA Average image colour red = %d, green = %d, blue = %d \n", r, g, b);
//transfer data from 1d to 2d for output
for (int i = 0, i_1d = 0; i < height; i++) {
for (int j = 0; j < width * 3; j++, i_1d++) {
*(*(data + i) + j) = *(gpu_data + i_1d);
}
}
free(gpu_data);
output(out_file);
break;
}
}
free(data);
getchar();
return 0;
}
int process_command_line(int argc, char *argv[]) {
if (argc < 7) {
fprintf(stderr, "Error: Missing program arguments. Correct usage is...\n");
print_help();
return FAILURE;
}
//first argument is always the executable name
//read in the non optional command line arguments
c = atoi(argv[1]);
if (c <= 0) {
printf("The value of c is invalid.");
return FAILURE;
}
c = pow(2.0, (double)(int)log2(c)); // change the value of c to be valid
if (!strcmp(argv[2], "CPU")) { execution_mode = CPU; };
if (!strcmp(argv[2], "OPENMP")) { execution_mode = OPENMP; };
if (!strcmp(argv[2], "CUDA")) { execution_mode = CUDA; };
if (!strcmp(argv[2], "ALL")) { execution_mode = ALL; };
//TODO: read in the input image name
in_file = argv[4];
//TODO: read in the output image name
out_file = argv[6];
//TODO: read in any optional part 3 arguments
if (argc > 8) {
if (!strcmp(argv[8], "PPM_BINARY")) { output_mode = PPM_BINARY; };
if (!strcmp(argv[8], "PPM_PLAIN_TEXT")) { output_mode = PPM_PLAIN_TEXT; };
}
return SUCCESS;
}
void print_help() {
printf("mosaic_%s C M -i input_file -o output_file [options]\n", USER_NAME);
printf("where:\n");
printf("\tC Is the mosaic cell size which should be any positive\n"
"\t power of 2 number \n");
printf("\tM Is the mode with a value of either CPU, OPENMP, CUDA or\n"
"\t ALL. The mode specifies which version of the simulation\n"
"\t code should execute. ALL should execute each mode in\n"
"\t turn.\n");
printf("\t-i input_file Specifies an input image file\n");
printf("\t-o output_file Specifies an output image file which will be used\n"
"\t to write the mosaic image\n");
printf("[options]:\n");
printf("\t-f ppm_format PPM image output format either PPM_BINARY (default) or \n"
"\t PPM_PLAIN_TEXT\n ");
}
/* Read header information of the file*/
FILE *read_header(FILE *fp) {
char read_line[10];
while (1) {
// exit if reading to the end of file
if (fgets(read_line, sizeof(read_line), fp) == NULL) {
return FAILURE;
}
// exit if reading to the end line of header
if (strncmp(read_line, "255", 3) == 0) {
//size = str_cat(size, input);
break;
}
// file format (either P3 or P6)
if (strncmp(read_line, "P3", 2) == 0) {
strcpy(ftype, "P3");
}
else if (strncmp(read_line, "P6", 2) == 0) {
strcpy(ftype, "P6");
}
// skip if reading to command line
else if (strncmp(read_line, "#", 1) == 0) {
continue;
}
// first number is file width and sencond one is height
else {
//size = str_cat(size, input);
// width is not assigned
if (width == 0) {
width = atoi(read_line);
}
else {
height = atoi(read_line);
}
}
}
return fp;
}
/** Read data from the file and do pre-processing
Store the pixel data into the array and return the pointer of the array
*/
unsigned char **read_data(const char *fname) {
FILE* fp;
fp = fopen(fname, "rb");
if (fp == NULL) { perror(fname); return 0; }
// read header
fp = read_header(fp);
if (c > width || c > height) {
printf("\nThe value of c is invalide");
exit(0);
}
unsigned char **pixel_data = (unsigned char **)malloc(height * sizeof(unsigned char *)); // the memory allocate to store the pixel data
if (strcmp(ftype, "P3") == 0) {
for (int row = 0; row < height; row++) {
pixel_data[row] = (unsigned char *)malloc(width * 3 * sizeof(unsigned char));
}
unsigned char *term = (unsigned char *)malloc(sizeof(unsigned char) * 1);
int i = 0;
int row, col;
while (fscanf(fp, "%u", &term) == 1) {
row = i / (width * 3);
col = i % (width * 3);
(*(pixel_data + row))[col] = (unsigned char)term;
i++;
}
fclose(fp);
}
if (strcmp(ftype, "P6") == 0) {
int column, row, k;
unsigned char * buf = (unsigned char *)malloc(width*height * 3 * sizeof(unsigned char));
fread(buf, sizeof(unsigned char), width*height * 3, fp); // read all data from the file
for (row = 0, k = 0; row < height; row++) {
pixel_data[row] = (unsigned char *)malloc(width * 3 * sizeof(unsigned char));
for (column = 0; column < width * 3; column++, k++) {
*(*(pixel_data + row) + column) = (unsigned int)buf[k];
}
}
free(buf);
fclose(fp);
}
return pixel_data;
}
inline double log2(double n) {
return log(n) / log(2);
}
void cpu_cal() {
printf("CPU RUNNING\n");
int i, j, ci, cj; // for index
int r_ = 0, g_ = 0, b_ = 0; // to calculate the average rgb
int r_acc = 0, g_acc = 0, b_acc = 0; // accumulated rgb for each block
int rc = 0, gc = 0, bc = 0; // accumulated rgb for whole image
int i_c = c, j_c = c; // to solve the boundry overflow problem
int counter;
for (i = 0; i < height; i += c) { // row in image
for (j = 0; j < width * 3; j += 3 * c) { // column in image
for (ci = i, r_acc = 0, g_acc = 0, b_acc = 0, counter = 0; ci < i + c && ci < height; ci++) { // row in block
for (cj = j; cj < j + c * 3 && cj < width * 3; cj += 3, counter++) { // column in block
r_acc += *(*(data + ci) + cj + 0);
g_acc += *(*(data + ci) + cj + 1);
b_acc += *(*(data + ci) + cj + 2);
}
}
unsigned int
r_avg = r_acc / counter,
g_avg = g_acc / counter,
b_avg = b_acc / counter;
rc += r_acc;
gc += g_acc;
bc += b_acc;
for (ci = i; ci < i + c && ci < height; ci++) { // row in block
for (cj = j; cj < j + c * 3 && cj < width * 3; cj += 3) { // column in block
*(*(data + ci) + cj + 0) = r_avg;
*(*(data + ci) + cj + 1) = g_avg;
*(*(data + ci) + cj + 2) = b_avg;
}
}
r_ += r_avg;
g_ += g_avg;
b_ += b_avg;
}
}
r = rc / (width * height);
g = gc / (width * height);
b = bc / (width * height);
}
void openmp_cal() {
printf("OPENMP RUNNING\n");
int r_ = 0, g_ = 0, b_ = 0; // to calculate the average rgb
int rc = 0, gc = 0, bc = 0; // accumulated rgb for whole image
int i;
int r_acc, g_acc, b_acc; // accumulated rgb
#pragma omp parallel for reduction(+: r_ , g_ , b_)
for (i = 0; i < height; i += c) { // row in image
int j;
int ci, cj; // for index
int counter = 0;
int r_avg = 0, g_avg = 0, b_avg = 0;
#pragma omp parallel for reduction(+: rc , gc , bc,r_acc, g_acc, b_acc)
for (j = 0; j < width * 3; j += 3 * c) { // column in image
for (ci = i, r_acc = 0, g_acc = 0, b_acc = 0, counter = 0; ci < i + c && ci < height; ci++) { // row in block
for (cj = j; cj < j + c * 3 && cj < width * 3; cj += 3, counter++) { // column in block
r_acc += *(*(data + ci) + cj + 0);
g_acc += *(*(data + ci) + cj + 1);
b_acc += *(*(data + ci) + cj + 2);
}
}
r_avg = r_acc / counter;
g_avg = g_acc / counter;
b_avg = b_acc / counter;
rc += r_acc;
gc += g_acc;
bc += b_acc;
for (ci = i; ci < i + c && ci < height; ci++) { // row in block
for (cj = j; cj < j + c * 3 && cj < width * 3; cj += 3) { // column in block
*(*(data + ci) + cj + 0) = r_avg;
*(*(data + ci) + cj + 1) = g_avg;
*(*(data + ci) + cj + 2) = b_avg;
}
}
r_ += r_avg;
g_ += g_avg;
b_ += b_avg;
}
}
r = rc / (width * height);
g = gc / (width * height);
b = bc / (width * height);
}
/*Pixcel based add up value, per pixcel per thread*/
__global__
void add_up(unsigned char *data, int width, int height, int c, int new_width, int new_height, unsigned int * add_up_data, unsigned int * c_array, unsigned long long int * rgb_all) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < width*height) {
int loc_row = i / width;
int loc_col = i % width;
int loc_row_new = loc_row / c;
int loc_col_new = loc_col / c;
atomicAdd((add_up_data + (loc_row_new * new_width + loc_col_new) * 3 + 0), *(data + i * 3 + 0));
atomicAdd((add_up_data + (loc_row_new * new_width + loc_col_new) * 3 + 1), *(data + i * 3 + 1));
atomicAdd((add_up_data + (loc_row_new * new_width + loc_col_new) * 3 + 2), *(data + i * 3 + 2));
atomicAdd((c_array + (loc_row_new * new_width + loc_col_new)), 1); // to count how many pixcel in a mosic block
atomicAdd((rgb_all + 0), *(data + i * 3 + 0)); // to addup all rgb value
atomicAdd((rgb_all + 1), *(data + i * 3 + 1)); // to addup all rgb value
atomicAdd((rgb_all + 2), *(data + i * 3 + 2)); // to addup all rgb value
}
}
/*Mosaic based add up value, per mosic cell per block*/
__global__
void add_up_optimised(unsigned char *data, int width, int height, int c, int new_width, int new_height, unsigned int * add_up_data, unsigned int * c_array, unsigned long long int * rgb_all, int per_mosaic_block_num) {
__shared__ unsigned int r;
__shared__ unsigned int g;
__shared__ unsigned int b;
int i = (threadIdx.x / c + blockIdx.y*c)*width + (blockIdx.x*c + threadIdx.x % c);
// row * width + col
int MAXIMUM_WIDTH = c > 32 ? 32 : c;
int loc_row = i / width;
int loc_col = i % width;
int loc_row_new = loc_row / c;
int loc_col_new = loc_col / c;
int blockid = blockIdx.x + gridDim.x*blockIdx.y;
int cellid = blockIdx.x / per_mosaic_block_num + (blockIdx.y / per_mosaic_block_num)*(gridDim.x / per_mosaic_block_num);
int capacity = c * c;
if (per_mosaic_block_num > 1) {
if (blockIdx.x % per_mosaic_block_num == per_mosaic_block_num - 1) {
if (blockIdx.y % per_mosaic_block_num == per_mosaic_block_num - 1) {
capacity = c - (per_mosaic_block_num - 1) * 32;
capacity = capacity * capacity;
}
capacity = (c - (per_mosaic_block_num - 1) * 32) * 32;
}
}
if (threadIdx.x < capacity-1) {
printf("%d %d %d %d %d %d \n", i, cellid, blockid, *(data + i * 3 + 0), *(data + i * 3 + 1), *(data + i * 3 + 2));
atomicAdd(&r, *(data + i * 3 + 0));
atomicAdd(&g, *(data + i * 3 + 1));
atomicAdd(&b, *(data + i * 3 + 2));
}
__syncthreads();
if (threadIdx.x == 0) {
printf("---%d %d %d %d %d %d \n", i, cellid, blockid, r, g, b);
atomicAdd((add_up_data + cellid * 3 + 0), r);
atomicAdd((add_up_data + cellid * 3 + 1), g);
atomicAdd((add_up_data + cellid * 3 + 2), b);
atomicAdd((c_array + cellid), capacity); // to count how many pixcel in a mosic block
atomicAdd((rgb_all + 0), r); // to addup all rgb value
atomicAdd((rgb_all + 1), g); // to addup all rgb value
atomicAdd((rgb_all + 2), b); // to addup all rgb value
}
}
/*calculate the average value in mosaic cell and replace the original value by the value in mosaic cell*/
__global__
void avg(unsigned char * data, int width, int height, int c, int new_width, int new_height, unsigned int * add_up_data, unsigned int * c_array) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < width*height) {
int loc_row = i / width;
int loc_col = i % width;
int loc_row_new = loc_row / c;
int loc_col_new = loc_col / c;
*(data + i * 3 + 0) = (*(add_up_data + (loc_row_new * new_width + loc_col_new) * 3 + 0) / *(c_array + (loc_row_new * new_width + loc_col_new)));
*(data + i * 3 + 1) = (*(add_up_data + (loc_row_new * new_width + loc_col_new) * 3 + 1) / *(c_array + (loc_row_new * new_width + loc_col_new)));
*(data + i * 3 + 2) = (*(add_up_data + (loc_row_new * new_width + loc_col_new) * 3 + 2) / *(c_array + (loc_row_new * new_width + loc_col_new)));
}
}
unsigned char * gpu_cal(unsigned char *gpu_data) {
size_t size = height * width * 3 * sizeof(unsigned char);
int add_up_data_width;
int add_up_data_height;
unsigned int *add_up_data_dev, *add_up_data_host; // to calculate the total rgb value in a mosic cell
unsigned int *c_array_dev, *c_array_host; // to count how many pixels in a mosic cell
unsigned char *data_1d_dev; // image data
unsigned long long int *rgb_all_dev, *rgb_all_host; // all rgb value addup
const int BLOCK_SIZE = 512;
add_up_data_width = width % c == 0 ? width / c : (width / c + 1);
add_up_data_height = height % c == 0 ? height / c : (height / c + 1);
add_up_data_host = (unsigned int *)malloc(add_up_data_width * add_up_data_height * 3 * sizeof(unsigned int));
c_array_host = (unsigned int *)malloc(add_up_data_width * add_up_data_height * sizeof(unsigned int));
rgb_all_host = (unsigned long long int *)malloc(3 * sizeof(unsigned long long int));
cudaMalloc(&data_1d_dev, size);
cudaMalloc(&add_up_data_dev, add_up_data_width * add_up_data_height * 3 * sizeof(unsigned int));
cudaMalloc(&c_array_dev, add_up_data_width * add_up_data_height * sizeof(int));
cudaMalloc(&rgb_all_dev, 3 * sizeof(unsigned long long int));
cudaMemcpy(data_1d_dev, gpu_data, size, cudaMemcpyHostToDevice);
// excutive addup kernel function
add_up << < ((size / 3) / BLOCK_SIZE) > 0 ? (size / 3) / BLOCK_SIZE : 1, BLOCK_SIZE >> > (data_1d_dev, width, height, c, add_up_data_width, add_up_data_height, add_up_data_dev, c_array_dev, rgb_all_dev);
// excutive average kernel function
avg << < ((size / 3) / BLOCK_SIZE) > 0 ? (size / 3) / BLOCK_SIZE : 1, BLOCK_SIZE >> > (data_1d_dev, width, height, c, add_up_data_width, add_up_data_height, add_up_data_dev, c_array_dev);
cudaMemcpy(rgb_all_host, rgb_all_dev, 3 * sizeof(unsigned long long int), cudaMemcpyDeviceToHost);
cudaMemcpy(gpu_data, data_1d_dev, width * height * 3 * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
r = *(rgb_all_host + 0) / width / height;
g = *(rgb_all_host + 1) / width / height;
b = *(rgb_all_host + 2) / width / height;
cudaFree(add_up_data_dev);
free(add_up_data_host);
cudaFree(c_array_dev);
free(c_array_host);
cudaFree(data_1d_dev);
cudaFree(rgb_all_dev);
free(rgb_all_host);
return gpu_data;
}
unsigned char * gpu_cal_optimised(unsigned char *gpu_data) {
unsigned int *add_up_data_dev, *add_up_data_host; // to calculate the total rgb value in a mosic cell
unsigned int *c_array_dev, *c_array_host; // to count how many pixels in a mosic cell
unsigned char *data_1d_dev; // image data
unsigned long long int *rgb_all_dev, *rgb_all_host; // all rgb value addup
const int BLOCK_SIZE = 512;
int add_up_data_width = width % c == 0 ? width / c : (width / c + 1);
int add_up_data_height = height % c == 0 ? height / c : (height / c + 1);
int BLOCKBIM = c > 32 ? 32 : c; // maximun is 32
int BLOCK_PER_MOSAIC = c / ( BLOCKBIM + 1 ) + 1;
dim3 block(BLOCKBIM*BLOCKBIM, 1, 1);
dim3 grid(BLOCK_PER_MOSAIC * add_up_data_width, BLOCK_PER_MOSAIC * add_up_data_height, 1);
size_t size = height * width * 3 * sizeof(unsigned char);
add_up_data_host = (unsigned int *)malloc(add_up_data_width * add_up_data_height * 3 * sizeof(unsigned int));
c_array_host = (unsigned int *)malloc(add_up_data_width * add_up_data_height * sizeof(unsigned int));
rgb_all_host = (unsigned long long int *)malloc(3 * sizeof(unsigned long long int));
cudaMalloc(&data_1d_dev, size);
cudaMalloc(&add_up_data_dev, add_up_data_width * add_up_data_height * 3 * sizeof(unsigned int));
cudaMalloc(&c_array_dev, add_up_data_width * add_up_data_height * sizeof(int));
cudaMalloc(&rgb_all_dev, 3 * sizeof(unsigned long long int));
cudaMemcpy(data_1d_dev, gpu_data, size, cudaMemcpyHostToDevice);
add_up_optimised << < grid, block >> > (data_1d_dev, width, height, c, add_up_data_width, add_up_data_height, add_up_data_dev, c_array_dev, rgb_all_dev, BLOCK_PER_MOSAIC);
avg << < ((size / 3) / BLOCK_SIZE) > 0 ? (size / 3) / BLOCK_SIZE : 1, BLOCK_SIZE >> > (data_1d_dev, width, height, c, add_up_data_width, add_up_data_height, add_up_data_dev, c_array_dev);
cudaDeviceSynchronize();
cudaMemcpy(rgb_all_host, rgb_all_dev, 3 * sizeof(unsigned long long int), cudaMemcpyDeviceToHost);
cudaMemcpy(c_array_host, c_array_dev, add_up_data_width * add_up_data_height * sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(gpu_data, data_1d_dev, width * height * 3 * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
r = *(rgb_all_host + 0) / width / height;
g = *(rgb_all_host + 1) / width / height;
b = *(rgb_all_host + 2) / width / height;
cudaFree(add_up_data_dev);
free(add_up_data_host);
cudaFree(c_array_dev);
free(c_array_host);
cudaFree(data_1d_dev);
cudaFree(rgb_all_dev);
free(rgb_all_host);
return gpu_data;
}
int output(char * fname) {
FILE* fp;
int row, column, p_i, index, i;
char* all_data;
unsigned char* bin_data;
char str_buf[10];
char* char_num = (char*)malloc(4);
int s = 0;
printf("\nStart writing---------------\n");
switch (output_mode) {
case(PPM_PLAIN_TEXT):
fp = fopen(fname, "w");
fputs("P3\n", fp);
fputs("# COM6521 Assignment test output\n", fp);
sprintf(str_buf, "%d\n", width);
fputs(str_buf, fp);
sprintf(str_buf, "%d\n", height);
fputs(str_buf, fp);
sprintf(str_buf, "%d\n", 255);
fputs(str_buf, fp);
// format all data into string and write it into file
all_data = (char *)malloc(width*height * 13 * sizeof(char));
memset(all_data, '\0', width*height * 13 * sizeof(char));
for (row = 0, p_i = 0, index = 0; row < height; row++, p_i++, index++) {
for (column = 0; column < width * 3; column++, i = 0, index++) { // process number by number
sprintf(char_num, "%d\0", *(*(data + row) + column));
for (i = 0; *(char_num + i) != '\0' && i < 3; i++, index++) {
*(all_data + index) = *(char_num + i);
}
if (p_i == 3) {
*(all_data + index) = '\t';
p_i = 0;
}
else {
*(all_data + index) = ' ';
}
}
*(all_data + index) = '\n';
}
fputs(all_data, fp);
free(all_data);
fclose(fp);
break;
case(PPM_BINARY):
fp = fopen(fname, "wb");
fputs("P6\n", fp);
fputs("# COM6521 Assignment test output\n", fp);
sprintf(str_buf, "%d\n", width);
fputs(str_buf, fp);
sprintf(str_buf, "%d\n", height);
fputs(str_buf, fp);
sprintf(str_buf, "%d\n", 255);
fputs(str_buf, fp);
bin_data = (unsigned char*)malloc(width*height * 3 * sizeof(unsigned char));
for (row = 0, index = 0; row < height; row++) {
for (column = 0; column < width * 3; column++, index++) {
*(bin_data + index) = (unsigned char)*(*(data + row) + column);
}
}
fwrite(bin_data, sizeof(unsigned char), width*height * 3 * sizeof(unsigned char), fp);
fclose(fp);
free(bin_data);
break;
}
printf("The file has been saved as %s", out_file);
return SUCCESS;
}
|
ab0f02b1dfdb151ac5b0f96b36973020e0354c7f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "VectorAdd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *VecA = NULL;
hipMalloc(&VecA, XSIZE*YSIZE);
float *VecB = NULL;
hipMalloc(&VecB, XSIZE*YSIZE);
float *VecC = NULL;
hipMalloc(&VecC, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
VectorAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, VecA,VecB,VecC,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
VectorAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, VecA,VecB,VecC,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
VectorAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, VecA,VecB,VecC,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ab0f02b1dfdb151ac5b0f96b36973020e0354c7f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "VectorAdd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *VecA = NULL;
cudaMalloc(&VecA, XSIZE*YSIZE);
float *VecB = NULL;
cudaMalloc(&VecB, XSIZE*YSIZE);
float *VecC = NULL;
cudaMalloc(&VecC, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
VectorAdd<<<gridBlock,threadBlock>>>(VecA,VecB,VecC,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
VectorAdd<<<gridBlock,threadBlock>>>(VecA,VecB,VecC,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
VectorAdd<<<gridBlock,threadBlock>>>(VecA,VecB,VecC,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4f09cf2bf080f4d39ea0269f4a9dc217baf3ea2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#include <stdio.h>
#include<cuda.h>
#include<iostream>
#include<cmath>
#include<time.h>
#include<iomanip>
using namespace std;
float duration_kernel;
const int BLOCK_SIZE = 32;
__host__
void matrix_mul_seq(float* a, float* b, float* p, int r1, int w, int c2)
{
for (int i = 0; i < r1; i++)
for (int j = 0; j < c2; j++) {
float sum = 0;
for (int k = 0; k < w; k++) {
float x = a[i * w + k];
float y = b[k * c2 + j];
sum += x * y;
}
p[i * c2 + j] = sum;
}
}
__global__
void MatrixMulKernel(float* M, float* N,
float* P, int Width, int r1, int c2)
{
// Calculate the row index of the P element and M
int Row = blockIdx.y * blockDim.y + threadIdx.y;
// Calculate the column index of P and N
int Col = blockIdx.x * blockDim.x + threadIdx.x;
float Pvalue = 0;
// Each thread computes one element of the block sub-matrix
if (Row < r1 && Col < c2)
{
for (int k = 0; k < Width; k++) {
Pvalue += M[Row * Width + k] * N[k * c2 + Col];
}
P[Row * c2 + Col] = Pvalue;
}
}
void matrix_mul_parallel(float* h_a, float* h_b, float* h_p, int r1, int w, int c2)
{
int size_a = r1 * w * sizeof(float);
int size_b = w * c2 * sizeof(float);
int size_p = r1 * c2 * sizeof(float);
float* d_a, *d_b, *d_p;
hipError_t err = hipMalloc((void**)&d_a, size_a);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_a, h_a, size_a, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void**)&d_b, size_b);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_b, h_b, size_b, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void**)&d_p, size_p);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipEvent_t star, end;
hipEventCreate(&star);
hipEventRecord(star, 0);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 dimGrid(ceil(r1 / (float)BLOCK_SIZE), ceil(c2 / (float)BLOCK_SIZE), 1);
clock_t start = clock();
MatrixMulKernel << <dimGrid, dimBlock >> > (d_a, d_b, d_p, w, r1, c2);
hipEventCreate(&end);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&duration_kernel, star, end);
cout << "Time spent by the Kernel: " << duration_kernel/1000 << " s " << endl;
err = hipMemcpy(h_p, d_p, size_p, hipMemcpyDeviceToHost);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipFree(d_a); hipFree(d_b); hipFree(d_p);
}
int main()
{
srand(time(NULL));
int r1, w, c2;
cout << "Enter rows for first matrix: ";
cin >> r1;
cout << "Enter columns of first matrix which is the same as rows for second matrix: ";
cin >> w;
cout << "Enter columns for second matrix: ";
cin >> c2;
int size_a = r1 * w;
int size_b = w * c2;
int size_p = r1 * c2;
float* a = new float[size_a];
float* b = new float[size_b];
float* p = new float[size_p];
float* d_p = new float[size_p];
// initializing elements of first matrix.
for (int i = 0; i < r1; i++)
for (int j = 0; j < w; j++)
{
//a[i * w + j] = double(rand()) / (double(RAND_MAX / LLONG_MAX) + 1.0);
a[i * w + j] =i*j;
}
// initializing elements of second matrix.
for (int i = 0; i < w; i++)
for (int j = 0; j < c2; j++)
{
//b[i * c2 + j] = double(rand()) / (double(RAND_MAX / LLONG_MAX) + 1.0);
b[i * c2 + j] = i*j;
}
// Initializing elements of matrix p to 0.
for (int i = 0; i < r1; i++)
for (int j = 0; j < c2; j++)
{
p[i * c2 + j] = 0;
}
//calling the sequential function
clock_t start = clock();
matrix_mul_seq(a, b, p, r1, w, c2);
clock_t stop = clock();
double duration_cpu = (double)(stop - start) / CLOCKS_PER_SEC;
cout << " time spent by cpu in seconds : " << duration_cpu << endl;
// calling the parallel function
start = clock();
matrix_mul_parallel(a, b, d_p, r1, w, c2);
stop = clock();
double duration_device = (double)(stop - start) / CLOCKS_PER_SEC;
cout << "For Block size: " << BLOCK_SIZE << endl;
cout << " time spent by the device in seconds : " << duration_device << endl;
cout << " the speedup/slowdown (timing the kernel only ) is " << duration_cpu / (duration_kernel / 1000) << endl;
cout << " the speedup/slowdown (timing the device and the memory allocation overheads ) is " << duration_cpu / duration_device << endl;
unsigned long long int operations_count = ((r1 * w * c2) + (r1 * c2 * (w - 1)));
cout << " The Performance in GFLOPS = " << operations_count / duration_device << endl;
return 0;
} | 4f09cf2bf080f4d39ea0269f4a9dc217baf3ea2b.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <stdio.h>
#include<cuda.h>
#include<iostream>
#include<cmath>
#include<time.h>
#include<iomanip>
using namespace std;
float duration_kernel;
const int BLOCK_SIZE = 32;
__host__
void matrix_mul_seq(float* a, float* b, float* p, int r1, int w, int c2)
{
for (int i = 0; i < r1; i++)
for (int j = 0; j < c2; j++) {
float sum = 0;
for (int k = 0; k < w; k++) {
float x = a[i * w + k];
float y = b[k * c2 + j];
sum += x * y;
}
p[i * c2 + j] = sum;
}
}
__global__
void MatrixMulKernel(float* M, float* N,
float* P, int Width, int r1, int c2)
{
// Calculate the row index of the P element and M
int Row = blockIdx.y * blockDim.y + threadIdx.y;
// Calculate the column index of P and N
int Col = blockIdx.x * blockDim.x + threadIdx.x;
float Pvalue = 0;
// Each thread computes one element of the block sub-matrix
if (Row < r1 && Col < c2)
{
for (int k = 0; k < Width; k++) {
Pvalue += M[Row * Width + k] * N[k * c2 + Col];
}
P[Row * c2 + Col] = Pvalue;
}
}
void matrix_mul_parallel(float* h_a, float* h_b, float* h_p, int r1, int w, int c2)
{
int size_a = r1 * w * sizeof(float);
int size_b = w * c2 * sizeof(float);
int size_p = r1 * c2 * sizeof(float);
float* d_a, *d_b, *d_p;
cudaError_t err = cudaMalloc((void**)&d_a, size_a);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_a, h_a, size_a, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_b, size_b);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_b, h_b, size_b, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_p, size_p);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaEvent_t star, end;
cudaEventCreate(&star);
cudaEventRecord(star, 0);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 dimGrid(ceil(r1 / (float)BLOCK_SIZE), ceil(c2 / (float)BLOCK_SIZE), 1);
clock_t start = clock();
MatrixMulKernel << <dimGrid, dimBlock >> > (d_a, d_b, d_p, w, r1, c2);
cudaEventCreate(&end);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&duration_kernel, star, end);
cout << "Time spent by the Kernel: " << duration_kernel/1000 << " s " << endl;
err = cudaMemcpy(h_p, d_p, size_p, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaFree(d_a); cudaFree(d_b); cudaFree(d_p);
}
int main()
{
srand(time(NULL));
int r1, w, c2;
cout << "Enter rows for first matrix: ";
cin >> r1;
cout << "Enter columns of first matrix which is the same as rows for second matrix: ";
cin >> w;
cout << "Enter columns for second matrix: ";
cin >> c2;
int size_a = r1 * w;
int size_b = w * c2;
int size_p = r1 * c2;
float* a = new float[size_a];
float* b = new float[size_b];
float* p = new float[size_p];
float* d_p = new float[size_p];
// initializing elements of first matrix.
for (int i = 0; i < r1; i++)
for (int j = 0; j < w; j++)
{
//a[i * w + j] = double(rand()) / (double(RAND_MAX / LLONG_MAX) + 1.0);
a[i * w + j] =i*j;
}
// initializing elements of second matrix.
for (int i = 0; i < w; i++)
for (int j = 0; j < c2; j++)
{
//b[i * c2 + j] = double(rand()) / (double(RAND_MAX / LLONG_MAX) + 1.0);
b[i * c2 + j] = i*j;
}
// Initializing elements of matrix p to 0.
for (int i = 0; i < r1; i++)
for (int j = 0; j < c2; j++)
{
p[i * c2 + j] = 0;
}
//calling the sequential function
clock_t start = clock();
matrix_mul_seq(a, b, p, r1, w, c2);
clock_t stop = clock();
double duration_cpu = (double)(stop - start) / CLOCKS_PER_SEC;
cout << " time spent by cpu in seconds : " << duration_cpu << endl;
// calling the parallel function
start = clock();
matrix_mul_parallel(a, b, d_p, r1, w, c2);
stop = clock();
double duration_device = (double)(stop - start) / CLOCKS_PER_SEC;
cout << "For Block size: " << BLOCK_SIZE << endl;
cout << " time spent by the device in seconds : " << duration_device << endl;
cout << " the speedup/slowdown (timing the kernel only ) is " << duration_cpu / (duration_kernel / 1000) << endl;
cout << " the speedup/slowdown (timing the device and the memory allocation overheads ) is " << duration_cpu / duration_device << endl;
unsigned long long int operations_count = ((r1 * w * c2) + (r1 * c2 * (w - 1)));
cout << " The Performance in GFLOPS = " << operations_count / duration_device << endl;
return 0;
} |
fb39451b2f57c10e9d5f4966cdc3e9bb216d9d82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <windows.h>
#include <iostream>
#include <vector>
#include <cstring>
#include <device_launch_parameters.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
void resizeImage(const Mat &_src, Mat &_dst, const Size &s)
{
_dst = Mat::zeros(s, CV_8UC3);
double fRows = s.height / (float)_src.rows;
double fCols = s.width / (float)_src.cols;
int pX = 0;
int pY = 0;
for (int i = 0; i != _dst.rows; ++i) {
for (int j = 0; j != _dst.cols; ++j) {
pX = cvRound(i / (double)fRows);
pY = cvRound(j / (double)fCols);
if (pX < _src.rows && pX >= 0 && pY < _src.cols && pY >= 0) {
_dst.at<Vec3b>(i, j)[0] = _src.at<Vec3b>(pX, pY)[0];
_dst.at<Vec3b>(i, j)[1] = _src.at<Vec3b>(pX, pY)[1];
_dst.at<Vec3b>(i, j)[2] = _src.at<Vec3b>(pX, pY)[2];
}
}
}
}
bool initCUDA()
{
int count;
hipGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for (i = 0; i < count; i++) {
hipDeviceProp_t prop;
if (hipGetDeviceProperties(&prop, i) == hipSuccess) {
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
hipSetDevice(i);
return true;
}
__global__ void kernel(uchar* _src_dev, uchar * _dst_dev, int _src_step, int _dst_step,
int _src_rows, int _src_cols, int _dst_rows, int _dst_cols)
{
auto i = blockIdx.x;
auto j = blockIdx.y;
double fRows = _dst_rows / (float)_src_rows;
double fCols = _dst_cols / (float)_src_cols;
auto pX = 0;
auto pY = 0;
pX = (int)(i / fRows);
pY = (int)(j / fCols);
if (pX < _src_rows && pX >= 0 && pY < _src_cols && pY >= 0) {
*(_dst_dev + i * _dst_step + 3 * j + 0) = *(_src_dev + pX * _src_step + 3 * pY);
*(_dst_dev + i * _dst_step + 3 * j + 1) = *(_src_dev + pX * _src_step + 3 * pY + 1);
*(_dst_dev + i * _dst_step + 3 * j + 2) = *(_src_dev + pX * _src_step + 3 * pY + 2);
}
}
void resizeImageGpu(const Mat &_src, Mat &_dst, const Size &s)
{
_dst = Mat(s, CV_8UC3);
uchar *src_data = _src.data;
auto width = _src.cols;
auto height = _src.rows;
uchar *src_dev, *dst_dev;
hipMalloc((void**)&src_dev, 3 * width*height * sizeof(uchar));
hipMalloc((void**)&dst_dev, 3 * s.width * s.height * sizeof(uchar));
hipMemcpy(src_dev, src_data, 3 * width*height * sizeof(uchar), hipMemcpyHostToDevice);
double fRows = s.height / (float)_src.rows;
double fCols = s.width / (float)_src.cols;
auto src_step = _src.step;
auto dst_step = _dst.step;
dim3 grid(s.height, s.width);
dim3 block(32, 32);
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block) , 0, 0, src_dev, dst_dev, src_step, dst_step, height, width, s.height, s.width);
hipMemcpy(_dst.data, dst_dev, 3 * s.width * s.height * sizeof(uchar), hipMemcpyDeviceToHost);
}
int main()
{
Mat src = cv::imread("1.jpg", 1);
Mat dst_cpu;
imshow("Origin", src);
double start = GetTickCount();
resizeImage(src, dst_cpu, Size(src.cols * 2, src.rows * 2));
double end = GetTickCount();
cout << "cpu cost time" << end - start << "\n";
initCUDA();
Mat dst_gpu;
start = GetTickCount();
resizeImageGpu(src, dst_gpu, Size(src.cols * 2, src.rows * 2));
end = GetTickCount();
cout << "gpu cost time" << end - start << "\n";
cv::imshow("Zoom", dst_cpu);
waitKey(0);
return 0;
} | fb39451b2f57c10e9d5f4966cdc3e9bb216d9d82.cu | #include "cuda_runtime.h"
#include <windows.h>
#include <iostream>
#include <vector>
#include <cstring>
#include <device_launch_parameters.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
void resizeImage(const Mat &_src, Mat &_dst, const Size &s)
{
_dst = Mat::zeros(s, CV_8UC3);
double fRows = s.height / (float)_src.rows;
double fCols = s.width / (float)_src.cols;
int pX = 0;
int pY = 0;
for (int i = 0; i != _dst.rows; ++i) {
for (int j = 0; j != _dst.cols; ++j) {
pX = cvRound(i / (double)fRows);
pY = cvRound(j / (double)fCols);
if (pX < _src.rows && pX >= 0 && pY < _src.cols && pY >= 0) {
_dst.at<Vec3b>(i, j)[0] = _src.at<Vec3b>(pX, pY)[0];
_dst.at<Vec3b>(i, j)[1] = _src.at<Vec3b>(pX, pY)[1];
_dst.at<Vec3b>(i, j)[2] = _src.at<Vec3b>(pX, pY)[2];
}
}
}
}
bool initCUDA()
{
int count;
cudaGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for (i = 0; i < count; i++) {
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}
__global__ void kernel(uchar* _src_dev, uchar * _dst_dev, int _src_step, int _dst_step,
int _src_rows, int _src_cols, int _dst_rows, int _dst_cols)
{
auto i = blockIdx.x;
auto j = blockIdx.y;
double fRows = _dst_rows / (float)_src_rows;
double fCols = _dst_cols / (float)_src_cols;
auto pX = 0;
auto pY = 0;
pX = (int)(i / fRows);
pY = (int)(j / fCols);
if (pX < _src_rows && pX >= 0 && pY < _src_cols && pY >= 0) {
*(_dst_dev + i * _dst_step + 3 * j + 0) = *(_src_dev + pX * _src_step + 3 * pY);
*(_dst_dev + i * _dst_step + 3 * j + 1) = *(_src_dev + pX * _src_step + 3 * pY + 1);
*(_dst_dev + i * _dst_step + 3 * j + 2) = *(_src_dev + pX * _src_step + 3 * pY + 2);
}
}
void resizeImageGpu(const Mat &_src, Mat &_dst, const Size &s)
{
_dst = Mat(s, CV_8UC3);
uchar *src_data = _src.data;
auto width = _src.cols;
auto height = _src.rows;
uchar *src_dev, *dst_dev;
cudaMalloc((void**)&src_dev, 3 * width*height * sizeof(uchar));
cudaMalloc((void**)&dst_dev, 3 * s.width * s.height * sizeof(uchar));
cudaMemcpy(src_dev, src_data, 3 * width*height * sizeof(uchar), cudaMemcpyHostToDevice);
double fRows = s.height / (float)_src.rows;
double fCols = s.width / (float)_src.cols;
auto src_step = _src.step;
auto dst_step = _dst.step;
dim3 grid(s.height, s.width);
dim3 block(32, 32);
kernel<<< grid, block >>> (src_dev, dst_dev, src_step, dst_step, height, width, s.height, s.width);
cudaMemcpy(_dst.data, dst_dev, 3 * s.width * s.height * sizeof(uchar), cudaMemcpyDeviceToHost);
}
int main()
{
Mat src = cv::imread("1.jpg", 1);
Mat dst_cpu;
imshow("Origin", src);
double start = GetTickCount();
resizeImage(src, dst_cpu, Size(src.cols * 2, src.rows * 2));
double end = GetTickCount();
cout << "cpu cost time£º" << end - start << "\n";
initCUDA();
Mat dst_gpu;
start = GetTickCount();
resizeImageGpu(src, dst_gpu, Size(src.cols * 2, src.rows * 2));
end = GetTickCount();
cout << "gpu cost time£º" << end - start << "\n";
cv::imshow("Zoom", dst_cpu);
waitKey(0);
return 0;
} |
b987c6d808ad70f916ce4bba09a688e0840bdf67.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <locale.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <thrust/sort.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "nvstrings/NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../custring.cuh"
#include "../unicode/unicode_flags.h"
#include "../unicode/charcases.h"
#include "../util.h"
//
void printCudaError( hipError_t err, const char* prefix )
{
if( err == hipSuccess )
return;
fprintf(stderr,"%s: %s(%d):%s\n",prefix,hipGetErrorName(err),(int)err,hipGetErrorString(err));
//hipError_t err2 = hipGetLastError(); // clears the error too
//if( err != err2 )
// fprintf(stderr," %s:(%d):%s\n",hipGetErrorName(err2),(int)err2,hipGetErrorString(err2));
}
//
char32_t* to_char32( const char* ca )
{
unsigned int size = (unsigned int)strlen(ca);
unsigned int count = custring_view::chars_in_string(ca,size);
char32_t* rtn = new char32_t[count+1];
char32_t* optr = rtn;
const char* iptr = ca;
for( unsigned int i=0; i < size; ++i )
{
Char oc = 0;
unsigned int cw = custring_view::char_to_Char(iptr,oc);
iptr += cw;
i += cw - 1;
*optr++ = oc;
}
rtn[count] = 0;
return rtn;
}
//
static unsigned char* d_unicode_flags = nullptr;
unsigned char* get_unicode_flags()
{
if( !d_unicode_flags )
{
// leave this out of RMM since it is never freed
hipMalloc(&d_unicode_flags,65536);
hipMemcpy(d_unicode_flags,unicode_flags,65536,hipMemcpyHostToDevice);
}
return d_unicode_flags;
}
static unsigned short* d_charcases = nullptr;
unsigned short* get_charcases()
{
if( !d_charcases )
{
// leave this out of RMM since it is never freed
hipMalloc(&d_charcases,65536*sizeof(unsigned short));
hipMemcpy(d_charcases,charcases,65536*sizeof(unsigned short),hipMemcpyHostToDevice);
}
return d_charcases;
}
//
NVStringsImpl::NVStringsImpl(unsigned int count)
: bufferSize(0), memoryBuffer(nullptr), bIpcHandle(false), stream_id(0)
{
pList = new rmm::device_vector<custring_view*>(count,nullptr);
}
NVStringsImpl::~NVStringsImpl()
{
if( memoryBuffer && !bIpcHandle )
RMM_FREE(memoryBuffer,0);
if( bIpcHandle )
hipIpcCloseMemHandle(memoryBuffer);
memoryBuffer = nullptr;
delete pList;
pList = nullptr;
bufferSize = 0;
}
char* NVStringsImpl::createMemoryFor( size_t* d_lengths )
{
unsigned int count = (unsigned int)pList->size();
auto execpol = rmm::exec_policy(stream_id);
bufferSize = thrust::reduce(execpol->on(stream_id), d_lengths, d_lengths+count);
if( bufferSize==0 )
return 0; // this is valid; all sizes are zero
memoryBuffer = device_alloc<char>(bufferSize,stream_id);
return memoryBuffer;
}
//
int NVStrings_init_from_strings(NVStringsImpl* pImpl, const char** strs, unsigned int count )
{
hipError_t err = hipSuccess;
auto execpol = rmm::exec_policy(0);
// first compute the size of each string
size_t nbytes = 0;
thrust::host_vector<size_t> hoffsets(count+1,0);
//hoffsets[0] = 0; --already set by this ----^
thrust::host_vector<size_t> hlengths(count,0);
for( unsigned int idx=0; idx < count; ++idx )
{
const char* str = strs[idx];
size_t len = ( str ? (strlen(str)+1) : 0 );
size_t nsz = len; // include null-terminator
if( len > 0 ) // len=0 is null, len=1 is empty string
{
hlengths[idx] = len; // just the string length
int nchars = custring_view::chars_in_string(str,(int)len-1);
nsz = custring_view::alloc_size((int)len-1,nchars);
}
nsz = ALIGN_SIZE(nsz);
nbytes += nsz;
hoffsets[idx+1] = nbytes;
}
// check if they are all null
if( nbytes==0 )
return (int)err;
// Host serialization
size_t cheat = 0;//sizeof(custring_view);
char* h_flatstrs = (char*)malloc(nbytes);
if( !h_flatstrs )
{
fprintf(stderr,"init_from_strings: not enough CPU memory for intermediate buffer of size %ld bytes\n", nbytes);
return -1;
}
for( unsigned int idx = 0; idx < count; ++idx )
memcpy(h_flatstrs + hoffsets[idx] + cheat, strs[idx], hlengths[idx]);
// copy to device memory
char* d_flatstrs = nullptr;
rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0);
if( rerr == RMM_SUCCESS )
err = hipMemcpyAsync(d_flatstrs, h_flatstrs, nbytes, hipMemcpyHostToDevice);
free(h_flatstrs); // no longer needed
if( err != hipSuccess )
{
fprintf(stderr,"nvs-sts: alloc/copy %'lu bytes\n",nbytes);
printCudaError(err);
return (int)err;
}
// copy offsets and lengths to device memory
rmm::device_vector<size_t> offsets(hoffsets);
rmm::device_vector<size_t> lengths(hlengths);
size_t* d_offsets = offsets.data().get();
size_t* d_lengths = lengths.data().get();
// initialize custring objects in device memory
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){
size_t len = d_lengths[idx];
if( len < 1 )
return; // null string
size_t offset = d_offsets[idx];
char* ptr = d_flatstrs + offset;
char* str = ptr + cheat;
d_strings[idx] = custring_view::create_from(ptr,str,(int)len-1);
});
//
//err = hipDeviceSynchronize();
//if( err!=hipSuccess )
//{
// fprintf(stderr,"nvs-sts: sync=%d copy %'u strings\n",(int)err,count);
// printCudaError(err);
//}
pImpl->setMemoryBuffer(d_flatstrs,nbytes);
return (int)err;
}
// build strings from array of device pointers and sizes
int NVStrings_init_from_indexes( NVStringsImpl* pImpl, std::pair<const char*,size_t>* indexes, unsigned int count, bool bdevmem, NVStrings::sorttype stype )
{
hipError_t err = hipSuccess;
rmmError_t rerr = RMM_SUCCESS;
auto execpol = rmm::exec_policy(0);
thrust::pair<const char*,size_t>* d_indexes = (thrust::pair<const char*,size_t>*)indexes;
if( !bdevmem )
{
rerr = RMM_ALLOC(&d_indexes,sizeof(std::pair<const char*,size_t>)*count,0);
if( rerr == RMM_SUCCESS )
err = hipMemcpyAsync(d_indexes,indexes,sizeof(std::pair<const char*,size_t>)*count,hipMemcpyHostToDevice);
}
else
{
// Lets check what we got from the caller by reading all the memory once.
// This is wasteful but I cannot keep people from passing bad data:
// https://github.com/rapidsai/custrings/issues/191
// This check cannot be done inline below because libraries like thrust may terminate the process
// when illegal pointers are passed in. Here we do a pre-check, handle the error and return it.
// Do not put any other thrust calls before this line in this method.
try
{
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_indexes] __device__ (unsigned int idx) {
const char* str = d_indexes[idx].first;
size_t bytes = d_indexes[idx].second;
if( str )
custring_view::chars_in_string(str,(unsigned int)bytes);
});
err = hipDeviceSynchronize(); // do not remove this
}
catch( thrust::system_error& exc )
{
err = (hipError_t)exc.code().value();
//printf("exception: %d: %s\n", (int)err, e.what());
}
}
if( err != hipSuccess || rerr != RMM_SUCCESS )
{
printCudaError(err,"nvs-idx: checking parms");
if( !bdevmem )
RMM_FREE(d_indexes,0);
return (int)err;
}
// sort the list - helps reduce divergence
if( stype )
{
thrust::sort(execpol->on(0), d_indexes, d_indexes + count,
[stype] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) {
if( lhs.first==0 || rhs.first==0 )
return rhs.first!=0; // null < non-null
int diff = 0;
if( stype & NVStrings::length )
diff = (unsigned int)(lhs.second - rhs.second);
if( diff==0 && (stype & NVStrings::name) )
diff = custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second);
return (diff < 0);
});
}
// first get the size we need to store these strings
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_indexes, d_sizes] __device__ (unsigned int idx) {
const char* str = d_indexes[idx].first;
size_t bytes = d_indexes[idx].second;
if( str )
d_sizes[idx] = ALIGN_SIZE(custring_view::alloc_size((char*)str,(int)bytes));
});
// allocate device memory
size_t nbytes = thrust::reduce(execpol->on(0),sizes.begin(),sizes.end());
//printf("nvs-idx: %'lu bytes\n",nbytes);
if( nbytes==0 ) {
if( !bdevmem )
RMM_FREE(d_indexes,0);
return 0; // done, all the strings were null
}
char* d_flatdstrs = nullptr;
rerr = RMM_ALLOC(&d_flatdstrs,nbytes,0);
if( rerr != RMM_SUCCESS )
{
fprintf(stderr,"nvs-idx: RMM_ALLOC(%p,%lu)=%d\n", d_flatdstrs,nbytes,(int)rerr);
//printCudaError(err);
if( !bdevmem )
RMM_FREE(d_indexes,0);
return (int)err;
}
// build offsets array
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// now build the strings vector
custring_view_array d_strings = pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_indexes, d_flatdstrs, d_offsets, d_sizes, d_strings] __device__(unsigned int idx){
// add string to internal vector array
const char* str = d_indexes[idx].first;
size_t bytes = d_indexes[idx].second;
size_t offset = d_offsets[idx];
char* ptr = d_flatdstrs + offset;
custring_view* dstr = 0;
if( str )
dstr = custring_view::create_from(ptr,(char*)str,(int)bytes);
d_strings[idx] = dstr;
d_sizes[idx] = bytes;
});
//
pImpl->setMemoryBuffer(d_flatdstrs,nbytes);
if( !bdevmem )
RMM_FREE(d_indexes,0);
return (int)err;
}
// build strings from pointer and array of offsets
int NVStrings_init_from_offsets( NVStringsImpl* pImpl, const char* strs, int count, const int* offsets, const unsigned char* bitmask, int nulls )
{
if( count==nulls )
return 0; // if all are nulls then we are done
hipError_t err = hipSuccess;
auto execpol = rmm::exec_policy(0);
// first compute the size of each string
size_t nbytes = 0;
thrust::host_vector<size_t> hoffsets(count+1,0);
thrust::host_vector<size_t> hlengths(count,0);
for( int idx=0; idx < count; ++idx )
{
int offset = offsets[idx];
int len = offsets[idx+1] - offset;
const char* str = strs + offset;
int nchars = custring_view::chars_in_string(str,len);
int bytes = custring_view::alloc_size(len,nchars);
if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
bytes = 0;
hlengths[idx] = len;
nbytes += ALIGN_SIZE(bytes);
hoffsets[idx+1] = nbytes;
}
if( nbytes==0 )
return 0; // should not happen
// serialize host memory into a new buffer
unsigned int cheat = 0;//sizeof(custring_view);
char* h_flatstrs = (char*)malloc(nbytes);
for( int idx = 0; idx < count; ++idx )
memcpy(h_flatstrs + hoffsets[idx] + cheat, strs + offsets[idx], hlengths[idx]);
// copy whole thing to device memory
char* d_flatstrs = nullptr;
rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0);
if( rerr == RMM_SUCCESS )
err = hipMemcpyAsync(d_flatstrs, h_flatstrs, nbytes, hipMemcpyHostToDevice);
free(h_flatstrs); // no longer needed
if( err != hipSuccess )
{
fprintf(stderr,"nvs-ofs: alloc/copy %'lu bytes\n",nbytes);
printCudaError(err);
return (int)err;
}
// copy offsets and lengths to device memory
rmm::device_vector<size_t> doffsets(hoffsets);
rmm::device_vector<size_t> dlengths(hlengths);
size_t* d_offsets = doffsets.data().get();
size_t* d_lengths = dlengths.data().get();
// initialize custring objects in device memory
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){
size_t len = d_lengths[idx];
size_t offset = d_offsets[idx];
size_t size = d_offsets[idx+1] - offset;
if( size < 1 )
return; // null string
char* ptr = d_flatstrs + offset;
char* str = ptr + cheat;
d_strings[idx] = custring_view::create_from(ptr,str,len);
});
//
pImpl->setMemoryBuffer(d_flatstrs,nbytes);
return (int)err;
}
// build strings from array of device pointers and sizes
int NVStrings_init_from_device_offsets( NVStringsImpl* pImpl, const char* strs, int count, const int* offsets, const unsigned char* bitmask, int nulls )
{
if( count==nulls )
return 0; // if all are nulls then we are done
auto execpol = rmm::exec_policy(0);
// first compute the size of each string
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[strs, offsets, bitmask, d_sizes] __device__(unsigned int idx){
if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
return;
int offset = offsets[idx];
int len = offsets[idx+1] - offset;
const char* str = strs + offset;
int nchars = custring_view::chars_in_string(str,len);
int bytes = custring_view::alloc_size(len,nchars);
d_sizes[idx] = ALIGN_SIZE(bytes);
});
// copy whole thing to device memory
char* d_buffer = pImpl->createMemoryFor(d_sizes);
if( !d_buffer )
return 0; // nothing to do
// copy offsets and lengths to device memory
rmm::device_vector<size_t> out_offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),out_offsets.begin());
size_t* d_out_offsets = out_offsets.data().get();
// initialize custring objects in device memory
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[strs, offsets, bitmask, d_buffer, d_out_offsets, d_strings] __device__(unsigned int idx){
if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) )
return; // null string
int offset = offsets[idx];
int len = offsets[idx+1] - offset;
const char* in_str = strs + offset;
char* out_str = d_buffer + d_out_offsets[idx];
d_strings[idx] = custring_view::create_from(out_str,in_str,len);
});
//
return 0;
}
int NVStrings_copy_strings( NVStringsImpl* pImpl, std::vector<NVStringsImpl*>& strslist )
{
auto execpol = rmm::exec_policy(0);
auto pList = pImpl->pList;
unsigned int count = (unsigned int)pList->size();
size_t nbytes = 0;
for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ )
nbytes += (*itr)->getMemorySize();
custring_view_array d_results = pList->data().get();
char* d_buffer = device_alloc<char>(nbytes,0);
size_t ptr_offset = 0;
size_t buffer_offset = 0;
for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ )
{
NVStringsImpl* strs = *itr;
unsigned int size = strs->getCount();
size_t buffer_size = strs->getMemorySize();
if( size==0 )
continue;
rmm::device_vector<custring_view*> strings(size,nullptr);
custring_view** d_strings = strings.data().get();
// copy the pointers
CUDA_TRY( hipMemcpyAsync( d_strings, strs->getStringsPtr(), size*sizeof(custring_view*), hipMemcpyDeviceToDevice));
if( buffer_size )
{
// copy string memory
char* baseaddr = strs->getMemoryPtr();
char* buffer = d_buffer + buffer_offset;
CUDA_TRY( hipMemcpyAsync(buffer, baseaddr, buffer_size, hipMemcpyDeviceToDevice) );
// adjust pointers
custring_view_array results = d_results + ptr_offset;
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), size,
[buffer, baseaddr, d_strings, results] __device__(unsigned int idx){
char* dstr = (char*)d_strings[idx];
if( !dstr )
return;
size_t diff = dstr - baseaddr;
char* newaddr = buffer + diff;
results[idx] = (custring_view*)newaddr;
});
}
ptr_offset += size;
buffer_offset += buffer_size;
}
//
pImpl->setMemoryBuffer(d_buffer,nbytes);
return count;
}
int NVStrings_fixup_pointers( NVStringsImpl* pImpl, char* baseaddr )
{
auto execpol = rmm::exec_policy(0);
auto pList = pImpl->pList;
unsigned int count = (unsigned int)pList->size();
custring_view_array d_strings = pImpl->getStringsPtr();
//---- the following can be used to find the base-address of the original memory ----
//---- instead of passing it across the ipc boundary; leaving it here for now ----
//custring_view** first = thrust::min_element(execpol->on(0),d_strings,d_strings+count,
// [] __device__ (custring_view* lhs, custring_view* rhs) {
// return (lhs && rhs) ? (lhs < rhs) : rhs==0;
// });
//hipError_t err = hipMemcpy(&baseaddr,first,sizeof(custring_view*),hipMemcpyDeviceToHost);
//if( err!=hipSuccess )
// fprintf(stderr, "fixup: hipMemcpy(%p,%p,%d)=%d\n",&baseaddr,first,(int)sizeof(custring_view*),(int)err);
//
char* buffer = pImpl->getMemoryPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[buffer, baseaddr, d_strings] __device__(unsigned int idx){
char* dstr = (char*)d_strings[idx];
if( !dstr )
return;
size_t diff = dstr - baseaddr;
char* newaddr = buffer + diff;
d_strings[idx] = (custring_view*)newaddr;
});
//hipError_t err = hipDeviceSynchronize();
//if( err!=hipSuccess )
// printCudaError(err,"nvs-fixup");
return count;
}
| b987c6d808ad70f916ce4bba09a688e0840bdf67.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <locale.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <thrust/sort.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "nvstrings/NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../custring.cuh"
#include "../unicode/unicode_flags.h"
#include "../unicode/charcases.h"
#include "../util.h"
//
void printCudaError( cudaError_t err, const char* prefix )
{
if( err == cudaSuccess )
return;
fprintf(stderr,"%s: %s(%d):%s\n",prefix,cudaGetErrorName(err),(int)err,cudaGetErrorString(err));
//cudaError_t err2 = cudaGetLastError(); // clears the error too
//if( err != err2 )
// fprintf(stderr," %s:(%d):%s\n",cudaGetErrorName(err2),(int)err2,cudaGetErrorString(err2));
}
//
char32_t* to_char32( const char* ca )
{
unsigned int size = (unsigned int)strlen(ca);
unsigned int count = custring_view::chars_in_string(ca,size);
char32_t* rtn = new char32_t[count+1];
char32_t* optr = rtn;
const char* iptr = ca;
for( unsigned int i=0; i < size; ++i )
{
Char oc = 0;
unsigned int cw = custring_view::char_to_Char(iptr,oc);
iptr += cw;
i += cw - 1;
*optr++ = oc;
}
rtn[count] = 0;
return rtn;
}
//
static unsigned char* d_unicode_flags = nullptr;
unsigned char* get_unicode_flags()
{
if( !d_unicode_flags )
{
// leave this out of RMM since it is never freed
cudaMalloc(&d_unicode_flags,65536);
cudaMemcpy(d_unicode_flags,unicode_flags,65536,cudaMemcpyHostToDevice);
}
return d_unicode_flags;
}
static unsigned short* d_charcases = nullptr;
unsigned short* get_charcases()
{
if( !d_charcases )
{
// leave this out of RMM since it is never freed
cudaMalloc(&d_charcases,65536*sizeof(unsigned short));
cudaMemcpy(d_charcases,charcases,65536*sizeof(unsigned short),cudaMemcpyHostToDevice);
}
return d_charcases;
}
//
NVStringsImpl::NVStringsImpl(unsigned int count)
: bufferSize(0), memoryBuffer(nullptr), bIpcHandle(false), stream_id(0)
{
pList = new rmm::device_vector<custring_view*>(count,nullptr);
}
NVStringsImpl::~NVStringsImpl()
{
if( memoryBuffer && !bIpcHandle )
RMM_FREE(memoryBuffer,0);
if( bIpcHandle )
cudaIpcCloseMemHandle(memoryBuffer);
memoryBuffer = nullptr;
delete pList;
pList = nullptr;
bufferSize = 0;
}
char* NVStringsImpl::createMemoryFor( size_t* d_lengths )
{
unsigned int count = (unsigned int)pList->size();
auto execpol = rmm::exec_policy(stream_id);
bufferSize = thrust::reduce(execpol->on(stream_id), d_lengths, d_lengths+count);
if( bufferSize==0 )
return 0; // this is valid; all sizes are zero
memoryBuffer = device_alloc<char>(bufferSize,stream_id);
return memoryBuffer;
}
//
int NVStrings_init_from_strings(NVStringsImpl* pImpl, const char** strs, unsigned int count )
{
cudaError_t err = cudaSuccess;
auto execpol = rmm::exec_policy(0);
// first compute the size of each string
size_t nbytes = 0;
thrust::host_vector<size_t> hoffsets(count+1,0);
//hoffsets[0] = 0; --already set by this ----^
thrust::host_vector<size_t> hlengths(count,0);
for( unsigned int idx=0; idx < count; ++idx )
{
const char* str = strs[idx];
size_t len = ( str ? (strlen(str)+1) : 0 );
size_t nsz = len; // include null-terminator
if( len > 0 ) // len=0 is null, len=1 is empty string
{
hlengths[idx] = len; // just the string length
int nchars = custring_view::chars_in_string(str,(int)len-1);
nsz = custring_view::alloc_size((int)len-1,nchars);
}
nsz = ALIGN_SIZE(nsz);
nbytes += nsz;
hoffsets[idx+1] = nbytes;
}
// check if they are all null
if( nbytes==0 )
return (int)err;
// Host serialization
size_t cheat = 0;//sizeof(custring_view);
char* h_flatstrs = (char*)malloc(nbytes);
if( !h_flatstrs )
{
fprintf(stderr,"init_from_strings: not enough CPU memory for intermediate buffer of size %ld bytes\n", nbytes);
return -1;
}
for( unsigned int idx = 0; idx < count; ++idx )
memcpy(h_flatstrs + hoffsets[idx] + cheat, strs[idx], hlengths[idx]);
// copy to device memory
char* d_flatstrs = nullptr;
rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0);
if( rerr == RMM_SUCCESS )
err = cudaMemcpyAsync(d_flatstrs, h_flatstrs, nbytes, cudaMemcpyHostToDevice);
free(h_flatstrs); // no longer needed
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-sts: alloc/copy %'lu bytes\n",nbytes);
printCudaError(err);
return (int)err;
}
// copy offsets and lengths to device memory
rmm::device_vector<size_t> offsets(hoffsets);
rmm::device_vector<size_t> lengths(hlengths);
size_t* d_offsets = offsets.data().get();
size_t* d_lengths = lengths.data().get();
// initialize custring objects in device memory
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){
size_t len = d_lengths[idx];
if( len < 1 )
return; // null string
size_t offset = d_offsets[idx];
char* ptr = d_flatstrs + offset;
char* str = ptr + cheat;
d_strings[idx] = custring_view::create_from(ptr,str,(int)len-1);
});
//
//err = cudaDeviceSynchronize();
//if( err!=cudaSuccess )
//{
// fprintf(stderr,"nvs-sts: sync=%d copy %'u strings\n",(int)err,count);
// printCudaError(err);
//}
pImpl->setMemoryBuffer(d_flatstrs,nbytes);
return (int)err;
}
// build strings from array of device pointers and sizes
int NVStrings_init_from_indexes( NVStringsImpl* pImpl, std::pair<const char*,size_t>* indexes, unsigned int count, bool bdevmem, NVStrings::sorttype stype )
{
cudaError_t err = cudaSuccess;
rmmError_t rerr = RMM_SUCCESS;
auto execpol = rmm::exec_policy(0);
thrust::pair<const char*,size_t>* d_indexes = (thrust::pair<const char*,size_t>*)indexes;
if( !bdevmem )
{
rerr = RMM_ALLOC(&d_indexes,sizeof(std::pair<const char*,size_t>)*count,0);
if( rerr == RMM_SUCCESS )
err = cudaMemcpyAsync(d_indexes,indexes,sizeof(std::pair<const char*,size_t>)*count,cudaMemcpyHostToDevice);
}
else
{
// Lets check what we got from the caller by reading all the memory once.
// This is wasteful but I cannot keep people from passing bad data:
// https://github.com/rapidsai/custrings/issues/191
// This check cannot be done inline below because libraries like thrust may terminate the process
// when illegal pointers are passed in. Here we do a pre-check, handle the error and return it.
// Do not put any other thrust calls before this line in this method.
try
{
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_indexes] __device__ (unsigned int idx) {
const char* str = d_indexes[idx].first;
size_t bytes = d_indexes[idx].second;
if( str )
custring_view::chars_in_string(str,(unsigned int)bytes);
});
err = cudaDeviceSynchronize(); // do not remove this
}
catch( thrust::system_error& exc )
{
err = (cudaError_t)exc.code().value();
//printf("exception: %d: %s\n", (int)err, e.what());
}
}
if( err != cudaSuccess || rerr != RMM_SUCCESS )
{
printCudaError(err,"nvs-idx: checking parms");
if( !bdevmem )
RMM_FREE(d_indexes,0);
return (int)err;
}
// sort the list - helps reduce divergence
if( stype )
{
thrust::sort(execpol->on(0), d_indexes, d_indexes + count,
[stype] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) {
if( lhs.first==0 || rhs.first==0 )
return rhs.first!=0; // null < non-null
int diff = 0;
if( stype & NVStrings::length )
diff = (unsigned int)(lhs.second - rhs.second);
if( diff==0 && (stype & NVStrings::name) )
diff = custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second);
return (diff < 0);
});
}
// first get the size we need to store these strings
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_indexes, d_sizes] __device__ (unsigned int idx) {
const char* str = d_indexes[idx].first;
size_t bytes = d_indexes[idx].second;
if( str )
d_sizes[idx] = ALIGN_SIZE(custring_view::alloc_size((char*)str,(int)bytes));
});
// allocate device memory
size_t nbytes = thrust::reduce(execpol->on(0),sizes.begin(),sizes.end());
//printf("nvs-idx: %'lu bytes\n",nbytes);
if( nbytes==0 ) {
if( !bdevmem )
RMM_FREE(d_indexes,0);
return 0; // done, all the strings were null
}
char* d_flatdstrs = nullptr;
rerr = RMM_ALLOC(&d_flatdstrs,nbytes,0);
if( rerr != RMM_SUCCESS )
{
fprintf(stderr,"nvs-idx: RMM_ALLOC(%p,%lu)=%d\n", d_flatdstrs,nbytes,(int)rerr);
//printCudaError(err);
if( !bdevmem )
RMM_FREE(d_indexes,0);
return (int)err;
}
// build offsets array
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// now build the strings vector
custring_view_array d_strings = pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_indexes, d_flatdstrs, d_offsets, d_sizes, d_strings] __device__(unsigned int idx){
// add string to internal vector array
const char* str = d_indexes[idx].first;
size_t bytes = d_indexes[idx].second;
size_t offset = d_offsets[idx];
char* ptr = d_flatdstrs + offset;
custring_view* dstr = 0;
if( str )
dstr = custring_view::create_from(ptr,(char*)str,(int)bytes);
d_strings[idx] = dstr;
d_sizes[idx] = bytes;
});
//
pImpl->setMemoryBuffer(d_flatdstrs,nbytes);
if( !bdevmem )
RMM_FREE(d_indexes,0);
return (int)err;
}
// build strings from pointer and array of offsets
int NVStrings_init_from_offsets( NVStringsImpl* pImpl, const char* strs, int count, const int* offsets, const unsigned char* bitmask, int nulls )
{
if( count==nulls )
return 0; // if all are nulls then we are done
cudaError_t err = cudaSuccess;
auto execpol = rmm::exec_policy(0);
// first compute the size of each string
size_t nbytes = 0;
thrust::host_vector<size_t> hoffsets(count+1,0);
thrust::host_vector<size_t> hlengths(count,0);
for( int idx=0; idx < count; ++idx )
{
int offset = offsets[idx];
int len = offsets[idx+1] - offset;
const char* str = strs + offset;
int nchars = custring_view::chars_in_string(str,len);
int bytes = custring_view::alloc_size(len,nchars);
if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
bytes = 0;
hlengths[idx] = len;
nbytes += ALIGN_SIZE(bytes);
hoffsets[idx+1] = nbytes;
}
if( nbytes==0 )
return 0; // should not happen
// serialize host memory into a new buffer
unsigned int cheat = 0;//sizeof(custring_view);
char* h_flatstrs = (char*)malloc(nbytes);
for( int idx = 0; idx < count; ++idx )
memcpy(h_flatstrs + hoffsets[idx] + cheat, strs + offsets[idx], hlengths[idx]);
// copy whole thing to device memory
char* d_flatstrs = nullptr;
rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0);
if( rerr == RMM_SUCCESS )
err = cudaMemcpyAsync(d_flatstrs, h_flatstrs, nbytes, cudaMemcpyHostToDevice);
free(h_flatstrs); // no longer needed
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-ofs: alloc/copy %'lu bytes\n",nbytes);
printCudaError(err);
return (int)err;
}
// copy offsets and lengths to device memory
rmm::device_vector<size_t> doffsets(hoffsets);
rmm::device_vector<size_t> dlengths(hlengths);
size_t* d_offsets = doffsets.data().get();
size_t* d_lengths = dlengths.data().get();
// initialize custring objects in device memory
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){
size_t len = d_lengths[idx];
size_t offset = d_offsets[idx];
size_t size = d_offsets[idx+1] - offset;
if( size < 1 )
return; // null string
char* ptr = d_flatstrs + offset;
char* str = ptr + cheat;
d_strings[idx] = custring_view::create_from(ptr,str,len);
});
//
pImpl->setMemoryBuffer(d_flatstrs,nbytes);
return (int)err;
}
// build strings from array of device pointers and sizes
int NVStrings_init_from_device_offsets( NVStringsImpl* pImpl, const char* strs, int count, const int* offsets, const unsigned char* bitmask, int nulls )
{
if( count==nulls )
return 0; // if all are nulls then we are done
auto execpol = rmm::exec_policy(0);
// first compute the size of each string
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[strs, offsets, bitmask, d_sizes] __device__(unsigned int idx){
if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec
return;
int offset = offsets[idx];
int len = offsets[idx+1] - offset;
const char* str = strs + offset;
int nchars = custring_view::chars_in_string(str,len);
int bytes = custring_view::alloc_size(len,nchars);
d_sizes[idx] = ALIGN_SIZE(bytes);
});
// copy whole thing to device memory
char* d_buffer = pImpl->createMemoryFor(d_sizes);
if( !d_buffer )
return 0; // nothing to do
// copy offsets and lengths to device memory
rmm::device_vector<size_t> out_offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),out_offsets.begin());
size_t* d_out_offsets = out_offsets.data().get();
// initialize custring objects in device memory
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[strs, offsets, bitmask, d_buffer, d_out_offsets, d_strings] __device__(unsigned int idx){
if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) )
return; // null string
int offset = offsets[idx];
int len = offsets[idx+1] - offset;
const char* in_str = strs + offset;
char* out_str = d_buffer + d_out_offsets[idx];
d_strings[idx] = custring_view::create_from(out_str,in_str,len);
});
//
return 0;
}
int NVStrings_copy_strings( NVStringsImpl* pImpl, std::vector<NVStringsImpl*>& strslist )
{
auto execpol = rmm::exec_policy(0);
auto pList = pImpl->pList;
unsigned int count = (unsigned int)pList->size();
size_t nbytes = 0;
for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ )
nbytes += (*itr)->getMemorySize();
custring_view_array d_results = pList->data().get();
char* d_buffer = device_alloc<char>(nbytes,0);
size_t ptr_offset = 0;
size_t buffer_offset = 0;
for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ )
{
NVStringsImpl* strs = *itr;
unsigned int size = strs->getCount();
size_t buffer_size = strs->getMemorySize();
if( size==0 )
continue;
rmm::device_vector<custring_view*> strings(size,nullptr);
custring_view** d_strings = strings.data().get();
// copy the pointers
CUDA_TRY( cudaMemcpyAsync( d_strings, strs->getStringsPtr(), size*sizeof(custring_view*), cudaMemcpyDeviceToDevice));
if( buffer_size )
{
// copy string memory
char* baseaddr = strs->getMemoryPtr();
char* buffer = d_buffer + buffer_offset;
CUDA_TRY( cudaMemcpyAsync(buffer, baseaddr, buffer_size, cudaMemcpyDeviceToDevice) );
// adjust pointers
custring_view_array results = d_results + ptr_offset;
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), size,
[buffer, baseaddr, d_strings, results] __device__(unsigned int idx){
char* dstr = (char*)d_strings[idx];
if( !dstr )
return;
size_t diff = dstr - baseaddr;
char* newaddr = buffer + diff;
results[idx] = (custring_view*)newaddr;
});
}
ptr_offset += size;
buffer_offset += buffer_size;
}
//
pImpl->setMemoryBuffer(d_buffer,nbytes);
return count;
}
int NVStrings_fixup_pointers( NVStringsImpl* pImpl, char* baseaddr )
{
auto execpol = rmm::exec_policy(0);
auto pList = pImpl->pList;
unsigned int count = (unsigned int)pList->size();
custring_view_array d_strings = pImpl->getStringsPtr();
//---- the following can be used to find the base-address of the original memory ----
//---- instead of passing it across the ipc boundary; leaving it here for now ----
//custring_view** first = thrust::min_element(execpol->on(0),d_strings,d_strings+count,
// [] __device__ (custring_view* lhs, custring_view* rhs) {
// return (lhs && rhs) ? (lhs < rhs) : rhs==0;
// });
//cudaError_t err = cudaMemcpy(&baseaddr,first,sizeof(custring_view*),cudaMemcpyDeviceToHost);
//if( err!=cudaSuccess )
// fprintf(stderr, "fixup: cudaMemcpy(%p,%p,%d)=%d\n",&baseaddr,first,(int)sizeof(custring_view*),(int)err);
//
char* buffer = pImpl->getMemoryPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[buffer, baseaddr, d_strings] __device__(unsigned int idx){
char* dstr = (char*)d_strings[idx];
if( !dstr )
return;
size_t diff = dstr - baseaddr;
char* newaddr = buffer + diff;
d_strings[idx] = (custring_view*)newaddr;
});
//cudaError_t err = cudaDeviceSynchronize();
//if( err!=cudaSuccess )
// printCudaError(err,"nvs-fixup");
return count;
}
|
7c35c1a745ad1fc40565d494dd3e45579d1bd08d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
ssymv_upper.cu is nearly identical to ssymv_upper.cu, just change names and drop .
ssymv_kernel_U (upper) in ssymv_upper.cu is very similar to
ssymv_kernel_L (lower) in ssymv.cu; diff the two files to compare.
@generated from zhemv_mgpu_upper.cu normal z -> s, Fri Jan 30 19:00:10 2015
@author Mark Gates
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
********************************************************************/
__global__ void
ssymv_kernel_U_mgpu(
int n,
float const * __restrict__ A, int lda,
float const * __restrict__ x, int incx,
float * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
float psum, psum_t;
float total = MAGMA_S_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ float sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ float sx_blk[NB_X]; // for x[ blk ]
__shared__ float sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
float rA[4];
float psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( (partial && tx >= partial) ||
(blk == 0 && tx < block_offset) ) {
sx_blk[tx] = MAGMA_S_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for(int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for(int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum += ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
int next = blk + (my_gpu_id + ngpu - 1 - blk % ngpu) % ngpu + 1;
A += (next/ngpu)*NB_X*lda; // A is A(blk_ind + tx, next*NB_X + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for(int jj=next; jj < gridDim.x; jj += ngpu) {
partial = (jj == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// block is right of diagonal, so don't need to worry about offset here
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_S_ZERO;
}
}
__syncthreads();
for( int k=0; k < 4; k++ ) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for(int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_S_ZERO;
}
}
}
else {
#pragma unroll
for(int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for(int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for(int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; //MAGMA_S_MAKE( tx4, blk ); // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; //MAGMA_S_MAKE( tx, blk ); // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end ssymv_kernel_U_mgpu
/**************************************************************
Upper case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ]
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
Note beta*y is not included here; see magmablas_ssymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * ] blk=0 * data for non-transposed row w_blk = A_{blk,1:nblock} * x_{blk:nblock}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ x x * ] blk=2 blanks are not set
[ * ] blk=3
[ x x x x * ] blk=4
[ * ] blk=0
work[gpu=1] = [ x * ] blk=1
[ * ] blk=2
[ x x x * ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries right of the diagonal blocks are not accessed.
There are no blank lines; work has been set to 0 if a GPU has no data to contribute.
[ * ]
y[gpu=0] = [ * ]
[ x + x + * ]
[ * ]
[ x + x + x + x + * ]
[ * ]
y[gpu=1] = [ x + * ]
[ * ]
[ x + x + x + * ]
[ * ]
********************************************************************/
__global__ void
ssymv_kernel_U_mgpu_sum(
int n,
float alpha,
int lda,
float * __restrict__ y, int incy,
float const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
float Ax = MAGMA_S_ZERO;
work += ind;
// if this GPU owns block-column blk, all blocks j=[0, ..., blk] contain data;
// else only block j=blk contains data.
int first = 0;
if ( blk % ngpu != my_gpu_id ) {
first = blk;
}
for(int j = first; j <= blk; ++j) {
Ax += work[j*lda];
}
y[ind * incy] = alpha*Ax; // see magmablas_ssymv_sync for beta*y
}
}
// end ssymv_kernel_L_mgpu_sum
| 7c35c1a745ad1fc40565d494dd3e45579d1bd08d.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
ssymv_upper.cu is nearly identical to ssymv_upper.cu, just change names and drop .
ssymv_kernel_U (upper) in ssymv_upper.cu is very similar to
ssymv_kernel_L (lower) in ssymv.cu; diff the two files to compare.
@generated from zhemv_mgpu_upper.cu normal z -> s, Fri Jan 30 19:00:10 2015
@author Mark Gates
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
********************************************************************/
__global__ void
ssymv_kernel_U_mgpu(
int n,
float const * __restrict__ A, int lda,
float const * __restrict__ x, int incx,
float * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
float psum, psum_t;
float total = MAGMA_S_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ float sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ float sx_blk[NB_X]; // for x[ blk ]
__shared__ float sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
float rA[4];
float psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( (partial && tx >= partial) ||
(blk == 0 && tx < block_offset) ) {
sx_blk[tx] = MAGMA_S_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for(int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for(int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum += ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
int next = blk + (my_gpu_id + ngpu - 1 - blk % ngpu) % ngpu + 1;
A += (next/ngpu)*NB_X*lda; // A is A(blk_ind + tx, next*NB_X + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for(int jj=next; jj < gridDim.x; jj += ngpu) {
partial = (jj == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// block is right of diagonal, so don't need to worry about offset here
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_S_ZERO;
}
}
__syncthreads();
for( int k=0; k < 4; k++ ) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for(int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_S_ZERO;
}
}
}
else {
#pragma unroll
for(int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for(int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for(int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; //MAGMA_S_MAKE( tx4, blk ); // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; //MAGMA_S_MAKE( tx, blk ); // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end ssymv_kernel_U_mgpu
/**************************************************************
Upper case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ]
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
Note beta*y is not included here; see magmablas_ssymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * ] blk=0 * data for non-transposed row w_blk = A_{blk,1:nblock} * x_{blk:nblock}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ x x * ] blk=2 blanks are not set
[ * ] blk=3
[ x x x x * ] blk=4
[ * ] blk=0
work[gpu=1] = [ x * ] blk=1
[ * ] blk=2
[ x x x * ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries right of the diagonal blocks are not accessed.
There are no blank lines; work has been set to 0 if a GPU has no data to contribute.
[ * ]
y[gpu=0] = [ * ]
[ x + x + * ]
[ * ]
[ x + x + x + x + * ]
[ * ]
y[gpu=1] = [ x + * ]
[ * ]
[ x + x + x + * ]
[ * ]
********************************************************************/
__global__ void
ssymv_kernel_U_mgpu_sum(
int n,
float alpha,
int lda,
float * __restrict__ y, int incy,
float const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
float Ax = MAGMA_S_ZERO;
work += ind;
// if this GPU owns block-column blk, all blocks j=[0, ..., blk] contain data;
// else only block j=blk contains data.
int first = 0;
if ( blk % ngpu != my_gpu_id ) {
first = blk;
}
for(int j = first; j <= blk; ++j) {
Ax += work[j*lda];
}
y[ind * incy] = alpha*Ax; // see magmablas_ssymv_sync for beta*y
}
}
// end ssymv_kernel_L_mgpu_sum
|
b22ba4eaf61aececfe6a6f111c0d182cb44efddc.hip | // !!! This is a file automatically generated by hipify!!!
/* include petsc */
#include "petsc.h"
#include "mpi.h"
/* for measuring computation time */
#include "include/timer.h"
#ifdef USE_ROCM
/* some cuda helpers */
#include "include/cuda_stuff.h"
#endif
#define X_SIZE 1e6
#define M 5
#define N_TRIALS 1000
#define PRINT_VECTOR_CONTENT 0
void compute_dots(int n, int ntrials, int m, Vec *Mdots_vec, double *Mdots_val) {
PetscErrorCode ierr;
Timer mytimer;
mytimer.restart();
ierr = PetscPrintf(PETSC_COMM_WORLD,"### %d dot products\n", m); CHKERRV(ierr);
/* compute dot product one after another ("sequentially") */
mytimer.start();
for(int itrial=0;itrial<ntrials;itrial++){
for(int i=0;i<m;i++){
ierr = VecDot( Mdots_vec[0], Mdots_vec[i], &(Mdots_val[i])); CHKERRV(ierr);
}
}
mytimer.stop();
ierr = PetscPrintf(PETSC_COMM_WORLD,"- total time : %f s\n", mytimer.get_value_last()); CHKERRV(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,"- avg. time : %f s\n", mytimer.get_value_last()/(double)ntrials); CHKERRV(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,"- results control : " ); CHKERRV(ierr);
for(int i=0;i<m;i++){
ierr = PetscPrintf(PETSC_COMM_WORLD,"%f, ", Mdots_val[i]/(double)n); CHKERRV(ierr);
}
ierr = PetscPrintf(PETSC_COMM_WORLD,"\n\n"); CHKERRV(ierr);
}
void compute_mdot(int n, int ntrials, int m, Vec *Mdots_vec, double *Mdots_val) {
PetscErrorCode ierr;
Timer mytimer;
mytimer.restart();
ierr = PetscPrintf(PETSC_COMM_WORLD,"### multiple %d dot-product\n", m); CHKERRV(ierr);
/* compute multiple dot products ("one shot") */
mytimer.start();
for(int itrial=0;itrial<ntrials;itrial++){
ierr = VecMDot( Mdots_vec[0], m, Mdots_vec, Mdots_val); CHKERRV(ierr);
}
mytimer.stop();
ierr = PetscPrintf(PETSC_COMM_WORLD,"- total time : %f s\n", mytimer.get_value_last()); CHKERRV(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,"- avg. time : %f s\n", mytimer.get_value_last()/(double)ntrials); CHKERRV(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,"- results control : " ); CHKERRV(ierr);
for(int i=0;i<m;i++){
ierr = PetscPrintf(PETSC_COMM_WORLD,"%f, ", Mdots_val[i]/(double)n); CHKERRV(ierr);
}
ierr = PetscPrintf(PETSC_COMM_WORLD,"\n\n"); CHKERRV(ierr);
}
int main( int argc, char *argv[] )
{
/* error handling */
PetscErrorCode ierr;
/* initialize Petsc */
PetscInitialize(&argc,&argv,PETSC_NULL,PETSC_NULL);
/* warm up GPU - call empty kernel (see include/cuda_stuff.h) */
#ifdef USE_ROCM
warm_up_cuda();
#endif
/* problem dimensions */
int n = X_SIZE; /* length of vectors */
int ntrials = N_TRIALS; /* number of trials (to provide average time) */
int m = M; /* number of dot-products (I am computing <v1,v1>, <v1,v2>, <v1,v3>, ... <v1,vm>) */
/* print info about benchmark */
ierr = PetscPrintf(PETSC_COMM_WORLD,"This is MDOT test.\n"); CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD," - n : %d\t\t(length of vectors)\n",n); CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD," - ntrials : %d\t\t(number of trials)\n",ntrials); CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD," - m : %d\t\t(number of dot-products)\n",m); CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,"-------------------------------\n"); CHKERRQ(ierr);
Timer mytimer;
mytimer.restart();
mytimer.start();
/* create first vector x1 (all other will have the same layout) */
Vec x1;
ierr = VecCreate(PETSC_COMM_WORLD,&x1); CHKERRQ(ierr);
ierr = VecSetSizes(x1,PETSC_DECIDE,n); CHKERRQ(ierr);
#ifdef USE_ROCM
/* if we are using CUDA, it is a good idea to compute on GPU */
ierr = VecSetType(x1, VECMPICUDA); CHKERRQ(ierr);
#else
ierr = VecSetType(x1, VECMPI); CHKERRQ(ierr);
#endif
ierr = VecSetFromOptions(x1); CHKERRQ(ierr);
/* some values (in my case I will try v_i = i) */
ierr = VecSet(x1,1.0); CHKERRQ(ierr);
/* prepare other vectors, i.e. array of vectors (because of m=?) */
PetscScalar Mdots_val[m]; /* arrays of results of dot products */
Vec Mdots_vec[m]; /* array of vectors */
Mdots_vec[0] = x1; /* set first vector */
/* prepare other vectors */
for(int i=1;i<m;i++){
ierr = VecDuplicate(x1, &(Mdots_vec[i])); CHKERRQ(ierr);
ierr = VecSet(Mdots_vec[i],(PetscScalar)(i+1)); CHKERRQ(ierr);
}
mytimer.stop();
ierr = PetscPrintf(PETSC_COMM_WORLD,"- problem prepared in: %f s\n",mytimer.get_value_last()); CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,"\n"); CHKERRQ(ierr);
/* maybe print the content of the vectors ? */
if(PRINT_VECTOR_CONTENT){
for(int i=0;i<m;i++){
ierr = VecView(Mdots_vec[i], PETSC_VIEWER_STDOUT_WORLD); CHKERRQ(ierr);
}
}
/* COMPUTE SEQUENTIALLY DOT PRODUCTS */
compute_dots(n, ntrials, m, Mdots_vec, Mdots_val);
/* COMPUTE MULTIPLE DOT PRODUCTS */
compute_mdot(n, ntrials, m, Mdots_vec, Mdots_val);
/* finalize Petsc */
PetscFinalize();
return 0;
}
| b22ba4eaf61aececfe6a6f111c0d182cb44efddc.cu | /* include petsc */
#include "petsc.h"
#include "mpi.h"
/* for measuring computation time */
#include "include/timer.h"
#ifdef USE_CUDA
/* some cuda helpers */
#include "include/cuda_stuff.h"
#endif
#define X_SIZE 1e6
#define M 5
#define N_TRIALS 1000
#define PRINT_VECTOR_CONTENT 0
void compute_dots(int n, int ntrials, int m, Vec *Mdots_vec, double *Mdots_val) {
PetscErrorCode ierr;
Timer mytimer;
mytimer.restart();
ierr = PetscPrintf(PETSC_COMM_WORLD,"### %d dot products\n", m); CHKERRV(ierr);
/* compute dot product one after another ("sequentially") */
mytimer.start();
for(int itrial=0;itrial<ntrials;itrial++){
for(int i=0;i<m;i++){
ierr = VecDot( Mdots_vec[0], Mdots_vec[i], &(Mdots_val[i])); CHKERRV(ierr);
}
}
mytimer.stop();
ierr = PetscPrintf(PETSC_COMM_WORLD,"- total time : %f s\n", mytimer.get_value_last()); CHKERRV(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,"- avg. time : %f s\n", mytimer.get_value_last()/(double)ntrials); CHKERRV(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,"- results control : " ); CHKERRV(ierr);
for(int i=0;i<m;i++){
ierr = PetscPrintf(PETSC_COMM_WORLD,"%f, ", Mdots_val[i]/(double)n); CHKERRV(ierr);
}
ierr = PetscPrintf(PETSC_COMM_WORLD,"\n\n"); CHKERRV(ierr);
}
void compute_mdot(int n, int ntrials, int m, Vec *Mdots_vec, double *Mdots_val) {
PetscErrorCode ierr;
Timer mytimer;
mytimer.restart();
ierr = PetscPrintf(PETSC_COMM_WORLD,"### multiple %d dot-product\n", m); CHKERRV(ierr);
/* compute multiple dot products ("one shot") */
mytimer.start();
for(int itrial=0;itrial<ntrials;itrial++){
ierr = VecMDot( Mdots_vec[0], m, Mdots_vec, Mdots_val); CHKERRV(ierr);
}
mytimer.stop();
ierr = PetscPrintf(PETSC_COMM_WORLD,"- total time : %f s\n", mytimer.get_value_last()); CHKERRV(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,"- avg. time : %f s\n", mytimer.get_value_last()/(double)ntrials); CHKERRV(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,"- results control : " ); CHKERRV(ierr);
for(int i=0;i<m;i++){
ierr = PetscPrintf(PETSC_COMM_WORLD,"%f, ", Mdots_val[i]/(double)n); CHKERRV(ierr);
}
ierr = PetscPrintf(PETSC_COMM_WORLD,"\n\n"); CHKERRV(ierr);
}
int main( int argc, char *argv[] )
{
/* error handling */
PetscErrorCode ierr;
/* initialize Petsc */
PetscInitialize(&argc,&argv,PETSC_NULL,PETSC_NULL);
/* warm up GPU - call empty kernel (see include/cuda_stuff.h) */
#ifdef USE_CUDA
warm_up_cuda();
#endif
/* problem dimensions */
int n = X_SIZE; /* length of vectors */
int ntrials = N_TRIALS; /* number of trials (to provide average time) */
int m = M; /* number of dot-products (I am computing <v1,v1>, <v1,v2>, <v1,v3>, ... <v1,vm>) */
/* print info about benchmark */
ierr = PetscPrintf(PETSC_COMM_WORLD,"This is MDOT test.\n"); CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD," - n : %d\t\t(length of vectors)\n",n); CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD," - ntrials : %d\t\t(number of trials)\n",ntrials); CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD," - m : %d\t\t(number of dot-products)\n",m); CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,"-------------------------------\n"); CHKERRQ(ierr);
Timer mytimer;
mytimer.restart();
mytimer.start();
/* create first vector x1 (all other will have the same layout) */
Vec x1;
ierr = VecCreate(PETSC_COMM_WORLD,&x1); CHKERRQ(ierr);
ierr = VecSetSizes(x1,PETSC_DECIDE,n); CHKERRQ(ierr);
#ifdef USE_CUDA
/* if we are using CUDA, it is a good idea to compute on GPU */
ierr = VecSetType(x1, VECMPICUDA); CHKERRQ(ierr);
#else
ierr = VecSetType(x1, VECMPI); CHKERRQ(ierr);
#endif
ierr = VecSetFromOptions(x1); CHKERRQ(ierr);
/* some values (in my case I will try v_i = i) */
ierr = VecSet(x1,1.0); CHKERRQ(ierr);
/* prepare other vectors, i.e. array of vectors (because of m=?) */
PetscScalar Mdots_val[m]; /* arrays of results of dot products */
Vec Mdots_vec[m]; /* array of vectors */
Mdots_vec[0] = x1; /* set first vector */
/* prepare other vectors */
for(int i=1;i<m;i++){
ierr = VecDuplicate(x1, &(Mdots_vec[i])); CHKERRQ(ierr);
ierr = VecSet(Mdots_vec[i],(PetscScalar)(i+1)); CHKERRQ(ierr);
}
mytimer.stop();
ierr = PetscPrintf(PETSC_COMM_WORLD,"- problem prepared in: %f s\n",mytimer.get_value_last()); CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,"\n"); CHKERRQ(ierr);
/* maybe print the content of the vectors ? */
if(PRINT_VECTOR_CONTENT){
for(int i=0;i<m;i++){
ierr = VecView(Mdots_vec[i], PETSC_VIEWER_STDOUT_WORLD); CHKERRQ(ierr);
}
}
/* COMPUTE SEQUENTIALLY DOT PRODUCTS */
compute_dots(n, ntrials, m, Mdots_vec, Mdots_val);
/* COMPUTE MULTIPLE DOT PRODUCTS */
compute_mdot(n, ntrials, m, Mdots_vec, Mdots_val);
/* finalize Petsc */
PetscFinalize();
return 0;
}
|
82baad90b189ba91bef1319caf895533196d6726.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime_api.h>
#include "dali/test/plugins/dummy/dummy.h"
namespace other_ns {
template<>
void Dummy<::dali::GPUBackend>::RunImpl(::dali::Workspace &ws) {
const auto &input = ws.Input<::dali::GPUBackend>(0);
const auto &shape = input.shape();
auto &output = ws.Output<::dali::GPUBackend>(0);
for (int sample_idx = 0; sample_idx < shape.num_samples(); sample_idx++) {
CUDA_CALL(hipMemcpyAsync(
output.raw_mutable_tensor(sample_idx),
input.raw_tensor(sample_idx),
shape[sample_idx].num_elements() * input.type_info().size(),
hipMemcpyDeviceToDevice,
ws.stream()));
}
}
} // namespace other_ns
DALI_REGISTER_OPERATOR(CustomDummy, ::other_ns::Dummy<::dali::GPUBackend>, ::dali::GPU);
| 82baad90b189ba91bef1319caf895533196d6726.cu | // Copyright (c) 2017-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_runtime_api.h>
#include "dali/test/plugins/dummy/dummy.h"
namespace other_ns {
template<>
void Dummy<::dali::GPUBackend>::RunImpl(::dali::Workspace &ws) {
const auto &input = ws.Input<::dali::GPUBackend>(0);
const auto &shape = input.shape();
auto &output = ws.Output<::dali::GPUBackend>(0);
for (int sample_idx = 0; sample_idx < shape.num_samples(); sample_idx++) {
CUDA_CALL(cudaMemcpyAsync(
output.raw_mutable_tensor(sample_idx),
input.raw_tensor(sample_idx),
shape[sample_idx].num_elements() * input.type_info().size(),
cudaMemcpyDeviceToDevice,
ws.stream()));
}
}
} // namespace other_ns
DALI_REGISTER_OPERATOR(CustomDummy, ::other_ns::Dummy<::dali::GPUBackend>, ::dali::GPU);
|
758451856fb0dc47a83718f0b33e063f326cb20a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void heat (double * __restrict__ in, double * __restrict__ out1, double * __restrict__ out2, double * __restrict__ out3, double * __restrict__ out, int L, int M, int N) {
//Determining the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i-8);
int i = i0 + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j-8);
int j = j0 + (int)(threadIdx.y);
//Declarations
double reg_in_m1=0, __shared__ sh_in_c0[32][32], reg_in_p1=0;
double reg_out1_m2=0, __shared__ sh_out1_m1[32][32], reg_out1_c0=0;
double reg_out2_m3=0, __shared__ sh_out2_m2[32][32], reg_out2_m1=0;
double reg_out3_m4=0, __shared__ sh_out3_m3[32][32], reg_out3_m2=0;
//Value Initialization
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_in_m1 = in[0 + j*N + i];
sh_in_c0[j-j0][i-i0] = in[1*M*N + j*N + i];
}
//Rest of the computation
#pragma unroll 2
for (int k=1; k<=L-2; ++k) {
//Fetch new plane
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_in_p1 = in[(k+1)*M*N + j*N + i];
}
__syncthreads ();
if (j >= j0+1 & j <= min (j0+blockdim_j-2, M-2) & i >= i0+1 & i <= min (i0+blockdim_i-2, N-2)) {
reg_out1_c0 = ((((0.125f * ((reg_in_p1 - (2.0f * sh_in_c0[j-j0][i-i0])) + reg_in_m1)) + (0.125f * ((sh_in_c0[j-j0+1][i-i0] - (2.0f * sh_in_c0[j-j0][i-i0])) + sh_in_c0[j-j0-1][i-i0]))) + (0.125f * ((sh_in_c0[j-j0][i-i0+1] - (2.0f * sh_in_c0[j-j0][i-i0])) + sh_in_c0[j-j0][i-i0-1]))) + sh_in_c0[j-j0][i-i0]);
}
__syncthreads ();
if (j >= j0+2 & j <= min (j0+blockdim_j-3, M-2) & i >= i0+2 & i <= min (i0+blockdim_i-3, N-2)) {
reg_out2_m1 = ((((0.125f * ((reg_out1_c0 - (2.0f * sh_out1_m1[j-j0][i-i0])) + reg_out1_m2)) + (0.125f * ((sh_out1_m1[j-j0+1][i-i0] - (2.0f * sh_out1_m1[j-j0][i-i0])) + sh_out1_m1[j-j0-1][i-i0]))) + (0.125f * ((sh_out1_m1[j-j0][i-i0+1] - (2.0f * sh_out1_m1[j-j0][i-i0])) + sh_out1_m1[j-j0][i-i0-1]))) + sh_out1_m1[j-j0][i-i0]);
}
__syncthreads ();
if (j >= j0+3 & j <= min (j0+blockdim_j-4, M-2) & i >= i0+3 & i <= min (i0+blockdim_i-4, N-2)) {
reg_out3_m2 = ((((0.125f * ((reg_out2_m1 - (2.0f * sh_out2_m2[j-j0][i-i0])) + reg_out2_m3)) + (0.125f * ((sh_out2_m2[j-j0+1][i-i0] - (2.0f * sh_out2_m2[j-j0][i-i0])) + sh_out2_m2[j-j0-1][i-i0]))) + (0.125f * ((sh_out2_m2[j-j0][i-i0+1] - (2.0f * sh_out2_m2[j-j0][i-i0])) + sh_out2_m2[j-j0][i-i0-1]))) + sh_out2_m2[j-j0][i-i0]);
}
__syncthreads ();
if (j >= j0+4 & j <= min (j0+blockdim_j-5, M-2) & i >= i0+4 & i <= min (i0+blockdim_i-5, N-2)) {
out[max(k-3,0)*M*N + j*N + i] = ((((0.125f * ((reg_out3_m2 - (2.0f * sh_out3_m3[j-j0][i-i0])) + reg_out3_m4)) + (0.125f * ((sh_out3_m3[j-j0+1][i-i0] - (2.0f * sh_out3_m3[j-j0][i-i0])) + sh_out3_m3[j-j0-1][i-i0]))) + (0.125f * ((sh_out3_m3[j-j0][i-i0+1] - (2.0f * sh_out3_m3[j-j0][i-i0])) + sh_out3_m3[j-j0][i-i0-1]))) + sh_out3_m3[j-j0][i-i0]);
}
__syncthreads ();
//Value rotation
reg_in_m1 = sh_in_c0[j-j0][i-i0];
sh_in_c0[j-j0][i-i0] = reg_in_p1;
reg_out1_m2 = sh_out1_m1[j-j0][i-i0];
sh_out1_m1[j-j0][i-i0] = reg_out1_c0;
reg_out2_m3 = sh_out2_m2[j-j0][i-i0];
sh_out2_m2[j-j0][i-i0] = reg_out2_m1;
reg_out3_m4 = sh_out3_m3[j-j0][i-i0];
sh_out3_m3[j-j0][i-i0] = reg_out3_m2;
}
}
extern "C" void host_code (double *h_in, double *h_out, int L, int M, int N) {
double *in;
hipMalloc (&in, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for in\n");
hipMemcpy (in, h_in, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *out;
hipMalloc (&out, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out\n");
double *out1;
hipMalloc (&out1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out1\n");
double *out2;
hipMalloc (&out2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out2\n");
double *out3;
hipMalloc (&out3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out3\n");
dim3 blockconfig_1 (32, 32, 1);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x-8), ceil(M, blockconfig_1.y-8), 1);
hipLaunchKernelGGL(( heat) , dim3(gridconfig_1), dim3(blockconfig_1), 0, 0, in, out1, out2, out3, out, L, M, N);
hipMemcpy (h_out, out, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
//Free allocated memory
hipFree (in);
hipFree (out);
hipFree (out1);
hipFree (out2);
hipFree (out3);
}
| 758451856fb0dc47a83718f0b33e063f326cb20a.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void heat (double * __restrict__ in, double * __restrict__ out1, double * __restrict__ out2, double * __restrict__ out3, double * __restrict__ out, int L, int M, int N) {
//Determining the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i-8);
int i = i0 + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j-8);
int j = j0 + (int)(threadIdx.y);
//Declarations
double reg_in_m1=0, __shared__ sh_in_c0[32][32], reg_in_p1=0;
double reg_out1_m2=0, __shared__ sh_out1_m1[32][32], reg_out1_c0=0;
double reg_out2_m3=0, __shared__ sh_out2_m2[32][32], reg_out2_m1=0;
double reg_out3_m4=0, __shared__ sh_out3_m3[32][32], reg_out3_m2=0;
//Value Initialization
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_in_m1 = in[0 + j*N + i];
sh_in_c0[j-j0][i-i0] = in[1*M*N + j*N + i];
}
//Rest of the computation
#pragma unroll 2
for (int k=1; k<=L-2; ++k) {
//Fetch new plane
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_in_p1 = in[(k+1)*M*N + j*N + i];
}
__syncthreads ();
if (j >= j0+1 & j <= min (j0+blockdim_j-2, M-2) & i >= i0+1 & i <= min (i0+blockdim_i-2, N-2)) {
reg_out1_c0 = ((((0.125f * ((reg_in_p1 - (2.0f * sh_in_c0[j-j0][i-i0])) + reg_in_m1)) + (0.125f * ((sh_in_c0[j-j0+1][i-i0] - (2.0f * sh_in_c0[j-j0][i-i0])) + sh_in_c0[j-j0-1][i-i0]))) + (0.125f * ((sh_in_c0[j-j0][i-i0+1] - (2.0f * sh_in_c0[j-j0][i-i0])) + sh_in_c0[j-j0][i-i0-1]))) + sh_in_c0[j-j0][i-i0]);
}
__syncthreads ();
if (j >= j0+2 & j <= min (j0+blockdim_j-3, M-2) & i >= i0+2 & i <= min (i0+blockdim_i-3, N-2)) {
reg_out2_m1 = ((((0.125f * ((reg_out1_c0 - (2.0f * sh_out1_m1[j-j0][i-i0])) + reg_out1_m2)) + (0.125f * ((sh_out1_m1[j-j0+1][i-i0] - (2.0f * sh_out1_m1[j-j0][i-i0])) + sh_out1_m1[j-j0-1][i-i0]))) + (0.125f * ((sh_out1_m1[j-j0][i-i0+1] - (2.0f * sh_out1_m1[j-j0][i-i0])) + sh_out1_m1[j-j0][i-i0-1]))) + sh_out1_m1[j-j0][i-i0]);
}
__syncthreads ();
if (j >= j0+3 & j <= min (j0+blockdim_j-4, M-2) & i >= i0+3 & i <= min (i0+blockdim_i-4, N-2)) {
reg_out3_m2 = ((((0.125f * ((reg_out2_m1 - (2.0f * sh_out2_m2[j-j0][i-i0])) + reg_out2_m3)) + (0.125f * ((sh_out2_m2[j-j0+1][i-i0] - (2.0f * sh_out2_m2[j-j0][i-i0])) + sh_out2_m2[j-j0-1][i-i0]))) + (0.125f * ((sh_out2_m2[j-j0][i-i0+1] - (2.0f * sh_out2_m2[j-j0][i-i0])) + sh_out2_m2[j-j0][i-i0-1]))) + sh_out2_m2[j-j0][i-i0]);
}
__syncthreads ();
if (j >= j0+4 & j <= min (j0+blockdim_j-5, M-2) & i >= i0+4 & i <= min (i0+blockdim_i-5, N-2)) {
out[max(k-3,0)*M*N + j*N + i] = ((((0.125f * ((reg_out3_m2 - (2.0f * sh_out3_m3[j-j0][i-i0])) + reg_out3_m4)) + (0.125f * ((sh_out3_m3[j-j0+1][i-i0] - (2.0f * sh_out3_m3[j-j0][i-i0])) + sh_out3_m3[j-j0-1][i-i0]))) + (0.125f * ((sh_out3_m3[j-j0][i-i0+1] - (2.0f * sh_out3_m3[j-j0][i-i0])) + sh_out3_m3[j-j0][i-i0-1]))) + sh_out3_m3[j-j0][i-i0]);
}
__syncthreads ();
//Value rotation
reg_in_m1 = sh_in_c0[j-j0][i-i0];
sh_in_c0[j-j0][i-i0] = reg_in_p1;
reg_out1_m2 = sh_out1_m1[j-j0][i-i0];
sh_out1_m1[j-j0][i-i0] = reg_out1_c0;
reg_out2_m3 = sh_out2_m2[j-j0][i-i0];
sh_out2_m2[j-j0][i-i0] = reg_out2_m1;
reg_out3_m4 = sh_out3_m3[j-j0][i-i0];
sh_out3_m3[j-j0][i-i0] = reg_out3_m2;
}
}
extern "C" void host_code (double *h_in, double *h_out, int L, int M, int N) {
double *in;
cudaMalloc (&in, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for in\n");
cudaMemcpy (in, h_in, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *out;
cudaMalloc (&out, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out\n");
double *out1;
cudaMalloc (&out1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out1\n");
double *out2;
cudaMalloc (&out2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out2\n");
double *out3;
cudaMalloc (&out3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out3\n");
dim3 blockconfig_1 (32, 32, 1);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x-8), ceil(M, blockconfig_1.y-8), 1);
heat <<<gridconfig_1, blockconfig_1>>> (in, out1, out2, out3, out, L, M, N);
cudaMemcpy (h_out, out, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
//Free allocated memory
cudaFree (in);
cudaFree (out);
cudaFree (out1);
cudaFree (out2);
cudaFree (out3);
}
|
e05389f1421af99815a5b9779f939c038f374c76.hip | // !!! This is a file automatically generated by hipify!!!
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
namespace at { namespace native {
void bitwise_not_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) {
return !a;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ~a;
});
});
}
}
void logical_not_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(1), "logical_not_cuda", [&]() {
using self_t = scalar_t;
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(0), "logical_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(self_t a) -> scalar_t { return static_cast<scalar_t>(!a); });
});
});
}
void asin_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "asin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asin(a);
});
});
}
void ceil_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "ceil_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::ceil(a);
});
});
}
void expm1_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "expm1_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::expm1(a);
});
});
}
void floor_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "floor_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::floor(a);
});
});
}
void log_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log(a);
});
});
}
void log10_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log10_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log10(a);
});
});
}
void log1p_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log1p_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log1p(a);
});
});
}
void log2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log2_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log2(a);
});
});
}
void neg_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, iter.dtype(), "neg_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return -a;
});
});
}
// We manually overload nearbyint because std::nearbyint does not work with ROCm.
template <typename scalar_t>
__host__ __device__ static inline scalar_t nearbyint_wrapper(scalar_t a) {
return static_cast<scalar_t>(::nearbyintf(static_cast<float>(a)));
}
__host__ __device__ static inline double nearbyint_wrapper(double a) {
return ::nearbyint(a);
}
void round_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "round_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// We do not use std::round because we would like to round midway numbers to the nearest even integer.
return nearbyint_wrapper(a);
});
});
}
// We manually overload trunc because std::trunc does not work with ROCm.
template <typename scalar_t>
__host__ __device__ static inline scalar_t trunc_wrapper(scalar_t a) {
return static_cast<scalar_t>(::truncf(static_cast<float>(a)));
}
__host__ __device__ static inline double trunc_wrapper(double a) {
return ::trunc(a);
}
void trunc_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "trunc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return trunc_wrapper(a);
});
});
}
void rsqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "rsqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float.
return ::rsqrt(a);
});
});
}
void sign_kernel_cuda(TensorIterator& iter){
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a){
return a;
});
} else {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, iter.dtype(), "sign_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
scalar_t zero = scalar_t(0);
return (zero < a) - (a < zero);
});
});
}
}
void sin_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sin(a);
});
});
}
void sinh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sinh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
});
}
void sqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sqrt(a);
});
});
}
void sigmoid_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sigmoid_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
scalar_t one = scalar_t(1);
return one / (one + ::exp(- a));
});
});
}
void erfinv_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
}
void digamma_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "digamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_digamma(a);
});
});
}
void trigamma_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "trigamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_trigamma(a);
});
});
}
void polygamma_kernel_cuda(TensorIterator& iter, int64_t n) {
switch (n) {
case 0: digamma_kernel_cuda(iter); break;
case 1: trigamma_kernel_cuda(iter); break;
default: TORCH_CHECK(false, "polygamma(n,x) is not implemented for n>=2, but was ", n);
}
}
void lgamma_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "lgamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::lgamma(a);
});
});
}
REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda);
REGISTER_DISPATCH(logical_not_stub, &logical_not_kernel_cuda);
REGISTER_DISPATCH(asin_stub, &asin_kernel_cuda);
REGISTER_DISPATCH(ceil_stub, &ceil_kernel_cuda);
REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda);
REGISTER_DISPATCH(floor_stub, &floor_kernel_cuda);
REGISTER_DISPATCH(log_stub, &log_kernel_cuda);
REGISTER_DISPATCH(log10_stub, &log10_kernel_cuda);
REGISTER_DISPATCH(log2_stub, &log2_kernel_cuda);
REGISTER_DISPATCH(log1p_stub, &log1p_kernel_cuda);
REGISTER_DISPATCH(neg_stub, &neg_kernel_cuda);
REGISTER_DISPATCH(round_stub, &round_kernel_cuda);
REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda);
REGISTER_DISPATCH(sign_stub, &sign_kernel_cuda);
REGISTER_DISPATCH(sin_stub, &sin_kernel_cuda);
REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda);
REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda);
REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda);
REGISTER_DISPATCH(trunc_stub, &trunc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(digamma_stub, &digamma_kernel_cuda);
REGISTER_DISPATCH(polygamma_stub, &polygamma_kernel_cuda);
REGISTER_DISPATCH(lgamma_stub, &lgamma_kernel_cuda);
}}
| e05389f1421af99815a5b9779f939c038f374c76.cu | #include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
namespace at { namespace native {
void bitwise_not_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) {
return !a;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ~a;
});
});
}
}
void logical_not_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(1), "logical_not_cuda", [&]() {
using self_t = scalar_t;
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(0), "logical_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(self_t a) -> scalar_t { return static_cast<scalar_t>(!a); });
});
});
}
void asin_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "asin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asin(a);
});
});
}
void ceil_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "ceil_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return std::ceil(a);
});
});
}
void expm1_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "expm1_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::expm1(a);
});
});
}
void floor_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "floor_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return std::floor(a);
});
});
}
void log_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return std::log(a);
});
});
}
void log10_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log10_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log10(a);
});
});
}
void log1p_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log1p_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log1p(a);
});
});
}
void log2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log2_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log2(a);
});
});
}
void neg_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, iter.dtype(), "neg_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return -a;
});
});
}
// We manually overload nearbyint because std::nearbyint does not work with ROCm.
template <typename scalar_t>
__host__ __device__ static inline scalar_t nearbyint_wrapper(scalar_t a) {
return static_cast<scalar_t>(::nearbyintf(static_cast<float>(a)));
}
__host__ __device__ static inline double nearbyint_wrapper(double a) {
return ::nearbyint(a);
}
void round_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "round_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// We do not use std::round because we would like to round midway numbers to the nearest even integer.
return nearbyint_wrapper(a);
});
});
}
// We manually overload trunc because std::trunc does not work with ROCm.
template <typename scalar_t>
__host__ __device__ static inline scalar_t trunc_wrapper(scalar_t a) {
return static_cast<scalar_t>(::truncf(static_cast<float>(a)));
}
__host__ __device__ static inline double trunc_wrapper(double a) {
return ::trunc(a);
}
void trunc_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "trunc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return trunc_wrapper(a);
});
});
}
void rsqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "rsqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float.
return ::rsqrt(a);
});
});
}
void sign_kernel_cuda(TensorIterator& iter){
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a){
return a;
});
} else {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, iter.dtype(), "sign_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
scalar_t zero = scalar_t(0);
return (zero < a) - (a < zero);
});
});
}
}
void sin_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sin(a);
});
});
}
void sinh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sinh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
});
}
void sqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sqrt(a);
});
});
}
void sigmoid_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sigmoid_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
scalar_t one = scalar_t(1);
return one / (one + std::exp(- a));
});
});
}
void erfinv_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
}
void digamma_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "digamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_digamma(a);
});
});
}
void trigamma_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "trigamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_trigamma(a);
});
});
}
void polygamma_kernel_cuda(TensorIterator& iter, int64_t n) {
switch (n) {
case 0: digamma_kernel_cuda(iter); break;
case 1: trigamma_kernel_cuda(iter); break;
default: TORCH_CHECK(false, "polygamma(n,x) is not implemented for n>=2, but was ", n);
}
}
void lgamma_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "lgamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::lgamma(a);
});
});
}
REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda);
REGISTER_DISPATCH(logical_not_stub, &logical_not_kernel_cuda);
REGISTER_DISPATCH(asin_stub, &asin_kernel_cuda);
REGISTER_DISPATCH(ceil_stub, &ceil_kernel_cuda);
REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda);
REGISTER_DISPATCH(floor_stub, &floor_kernel_cuda);
REGISTER_DISPATCH(log_stub, &log_kernel_cuda);
REGISTER_DISPATCH(log10_stub, &log10_kernel_cuda);
REGISTER_DISPATCH(log2_stub, &log2_kernel_cuda);
REGISTER_DISPATCH(log1p_stub, &log1p_kernel_cuda);
REGISTER_DISPATCH(neg_stub, &neg_kernel_cuda);
REGISTER_DISPATCH(round_stub, &round_kernel_cuda);
REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda);
REGISTER_DISPATCH(sign_stub, &sign_kernel_cuda);
REGISTER_DISPATCH(sin_stub, &sin_kernel_cuda);
REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda);
REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda);
REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda);
REGISTER_DISPATCH(trunc_stub, &trunc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(digamma_stub, &digamma_kernel_cuda);
REGISTER_DISPATCH(polygamma_stub, &polygamma_kernel_cuda);
REGISTER_DISPATCH(lgamma_stub, &lgamma_kernel_cuda);
}}
|
fe2b732238283c64080850ab74d5b41ad68c3cb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
__global__ void abs_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = abs(dy[i]);
}
} | fe2b732238283c64080850ab74d5b41ad68c3cb5.cu | #include "includes.h"
extern "C"
__global__ void abs_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = abs(dy[i]);
}
} |
cba2fa8637972cd7bf323c1a713461f3ad4bdffd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
__global__ void vector_add(const int *a, const int *b, int *c){
*c = *b + *a;
}
int main (void) {
const int a = 2, b = 5;
int c = 0;
int *dev_a, *dev_b, *dev_c;
hipMalloc((void **)&dev_a, sizeof(int));
hipMalloc((void **)&dev_b, sizeof(int));
hipMalloc((void **)&dev_c, sizeof(int));
hipMemcpy(dev_a, &a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, &b, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vector_add), dim3(1),dim3(1), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
printf("%d + %d = %d, is that right ?\n", a, b, c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(0);
}
| cba2fa8637972cd7bf323c1a713461f3ad4bdffd.cu | #include<stdio.h>
__global__ void vector_add(const int *a, const int *b, int *c){
*c = *b + *a;
}
int main (void) {
const int a = 2, b = 5;
int c = 0;
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **)&dev_a, sizeof(int));
cudaMalloc((void **)&dev_b, sizeof(int));
cudaMalloc((void **)&dev_c, sizeof(int));
cudaMemcpy(dev_a, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, &b, sizeof(int), cudaMemcpyHostToDevice);
vector_add<<<1,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d + %d = %d, is that right ?\n", a, b, c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(0);
}
|
73d1c491f2a419311b28a4779a171b3f7c5721df.hip | // !!! This is a file automatically generated by hipify!!!
#include "alloc.hh"
#include "mode.hh"
#include <stdexcept>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line)
{
if (code != hipSuccess)
{
throw std::runtime_error {"GPUAssert: " + std::string(hipGetErrorString(code)) + " "
+ std::string(file) + ":" + std::to_string(line)};
}
}
dbl_t* tensor_alloc(std::size_t size)
{
if (program_mode() == ProgramMode::GPU)
{
dbl_t* res;
gpuErrchk(hipMalloc(&res, size * sizeof(dbl_t)));
return res;
}
else
return new dbl_t[size];
}
void tensor_free(dbl_t* ptr)
{
if (program_mode() == ProgramMode::GPU)
{
gpuErrchk(hipFree(ptr));
}
else
delete[] ptr;
}
| 73d1c491f2a419311b28a4779a171b3f7c5721df.cu | #include "alloc.hh"
#include "mode.hh"
#include <stdexcept>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess)
{
throw std::runtime_error {"GPUAssert: " + std::string(cudaGetErrorString(code)) + " "
+ std::string(file) + ":" + std::to_string(line)};
}
}
dbl_t* tensor_alloc(std::size_t size)
{
if (program_mode() == ProgramMode::GPU)
{
dbl_t* res;
gpuErrchk(cudaMalloc(&res, size * sizeof(dbl_t)));
return res;
}
else
return new dbl_t[size];
}
void tensor_free(dbl_t* ptr)
{
if (program_mode() == ProgramMode::GPU)
{
gpuErrchk(cudaFree(ptr));
}
else
delete[] ptr;
}
|
fe1088206abb77a1ec2680f13f66a2509e9c6141.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <gptl.h>
#include <gptl_cuda.h>
__global__ void runit (int *, int, int, int, int *); // global routine drives GPU calculations
__device__ int get_shared_idx (int, int, int *, uint);
__device__ int find_shared_idx (int, int);
__device__ void get_mutex (volatile int *);
__device__ void free_mutex (volatile int *);
#define WARPS_PER_SM 4
#define MAX_OVERSUB 1
#define SHARED_LOCS_PER_SM (WARPS_PER_SM * MAX_OVERSUB)
__shared__ int timer[SHARED_LOCS_PER_SM];
__device__ volatile int mutex1[5] = {0,0,0,0,0};
__device__ volatile int mutex2[5] = {0,0,0,0,0};
typedef struct {
int warp;
bool inuse;
} Map;
__shared__ volatile Map map[SHARED_LOCS_PER_SM];
int main (int argc, char **argv)
{
int ret; // return code
int c; // for getopt
int khz, warpsize, devnum, smcount, cores_per_sm, cores_per_gpu;
float oversub; // oversubscription fraction (diagnostic)
int blocksize; // blocksize for kernel launched
int nblocks; // number of blocks on the GPU given blocksize and oversub
int niter; // total number of iterations: default cores_per_gpu
int extraiter = 0; // number of extra iterations that don't complete a block
int nwarps; // total number warps in the computation
int warps_per_sm;
int shared_locs_per_sm;
int *global_timer;
int nbad;
int *maxidx; // diagnostic: max index used
// Retrieve information about the GPU and set defaults
ret = GPTLget_gpu_props (&khz, &warpsize, &devnum, &smcount, &cores_per_sm, &cores_per_gpu);
warps_per_sm = cores_per_sm / warpsize;
blocksize = cores_per_sm; // default
niter = cores_per_gpu; // default
// Check for user-supplied input to override defaults
while ((c = getopt (argc, argv, "b:n:")) != -1) {
switch (c) {
case 'b':
if ((blocksize = atoi (optarg)) < 1) {
printf ("blocksize (-b <arg>)must be > 0 %d is invalid\n", blocksize);
return -1;
}
break;
case 'n':
if ((niter = atoi (optarg)) < 1) {
printf ("niter (-n <arg>) must be > 0 %d is invalid\n", niter);
return -1;
}
break;
default:
printf ("unknown option %c\n", c);
printf ("Usage: %s [-b blocksize] [-k] [-n niter]\n", argv[0]);
return 2;
}
}
oversub = (float) niter / cores_per_gpu;
printf ("oversubscription factor=%f\n", oversub); // diagnostic only
if (oversub > MAX_OVERSUB)
printf ("WARNING: oversub=%f MAX_OVERSUB=%d\n", oversub, MAX_OVERSUB);
shared_locs_per_sm = warps_per_sm * MAX_OVERSUB;
nwarps = niter / warpsize;
if (niter % warpsize != 0)
nwarps = niter / warpsize + 1;
nblocks = niter / blocksize;
if (niter % blocksize != 0) {
extraiter = niter - nblocks*blocksize; // diagnostic only
nblocks = niter / blocksize + 1;
}
printf ("%d iters broken into nblocks=%d blocksize=%d\n", niter, nblocks, blocksize);
if (niter % blocksize != 0)
printf ("Last iteration will be only %d elements\n", extraiter);
// Initialize the GPTL library on CPU and GPU. This is mandatory before any start/stop
ret = GPTLinitialize ();
(void) (hipMallocManaged ((void **) &global_timer, sizeof (int) * nwarps));
(void) (hipMallocManaged ((void **) &maxidx, sizeof (int))); // diagnostic
for (int n = 0; n < nwarps; ++n)
global_timer[n] = 0;
hipLaunchKernelGGL(( runit), dim3(nblocks),dim3(blocksize), 0, 0, global_timer, shared_locs_per_sm, warpsize, nwarps, maxidx);
printf ("main calling hipDeviceSynchronize ()\n");
hipDeviceSynchronize (); // Ensure the GPU has finished the kernel before returning to CPU
printf ("main done calling hipDeviceSynchronize ()\n");
printf ("Final value of global_timer: should be 1 everywhere\n");
nbad = 0;
for (int n = 0; n < nwarps; ++n)
if (global_timer[n] != 1)
++nbad;
printf ("Number of bad values in global_timer=%d\n", nbad);
printf ("maxidx=%d\n", *maxidx);
return 0;
}
__device__ void get_mutex (volatile int *mutex)
{
bool isSet;
do {
// If mutex is 0, grab by setting = 1
// If mutex is 1, it stays 1 and isSet will be false
isSet = atomicCAS ((int *) mutex, 0, 1) == 0;
} while ( !isSet); // exit the loop after critical section executed
}
__device__ void free_mutex (volatile int *mutex)
{
*mutex = 0;
}
// This routine will be called at start of GPTLstart_gpu
__device__ int get_shared_idx (int mywarp, int shared_locs_per_sm, int *maxidx, uint smid)
{
int idx;
int tot;
// Use critical section to get index into shared array
get_mutex (&mutex1[smid]);
for (idx = 0; idx < shared_locs_per_sm; ++idx) {
if ( ! map[idx].inuse) {
map[idx].inuse = true;
map[idx].warp = mywarp;
if (idx > *maxidx)
*maxidx = idx; // diagnostic: max index used
break;
}
}
if (idx == shared_locs_per_sm) {
printf ("mywarp %d no spots available!\n", mywarp);
free_mutex (&mutex1[smid]);
return idx;
}
free_mutex (&mutex1[smid]);
return idx;
}
// This routine will be called at start of GPTLstop_gpu
__device__ int find_shared_idx (int mywarp, int shared_locs_per_sm)
{
int n;
int idx;
for (idx = 0; idx < shared_locs_per_sm; ++idx) {
if (map[idx].inuse && map[idx].warp == mywarp) {
break;
}
}
return idx;
}
__global__ void runit (int *global_timer, int shared_locs_per_sm, int warpsize, int nwarps,
int *maxidx)
{
int mywarp, mythread;
int idx;
int idxsave;
int ret;
float sleeptime;
uint smid;
bool isSet;
ret = GPTLget_warp_thread (&mywarp, &mythread);
if (mythread % warpsize == 0) {
// Stuff done in GPTLstart_gpu
asm ("mov.u32 %0, %smid;" : "=r"(smid));
idx = get_shared_idx (mywarp, shared_locs_per_sm, maxidx, smid); // grab shared mem slot
if (idx == shared_locs_per_sm) {
printf ("runit: no slots available\n");
return;
}
printf ("runit mywarp %d smid %d got idx %d\n", mywarp, smid, idx);
timer[idx] = global_timer[mywarp];
// Stuff done in GPTLstop_gpu
idxsave = idx;
idx = find_shared_idx (mywarp, shared_locs_per_sm);
if (idx == shared_locs_per_sm) {
printf ("runit: mywarp %d smid %d couldn't find my slot got inuse by warp %d\n",
mywarp, smid, map[idxsave].warp);
return;
}
// Serialize the work should force use of all available idx values
//get_mutex (&mutex2[smid]);
sleeptime = 0.1;
sleeptime = 0.0001 * mywarp;
sleeptime = 0.01;
GPTLmy_sleep (sleeptime);
++timer[idx];
global_timer[mywarp] = timer[idx];
// Reset shared memory to zero. Otherwise get random hangs due to shared memory cannot be
// guaranteed initialized to zero.
map[idx].inuse = false;
map[idx].warp = 0;
if (false)
printf ("mywarp %d released idx %d\n", mywarp, idx);
//free_mutex (&mutex2[smid]);
}
return;
}
| fe1088206abb77a1ec2680f13f66a2509e9c6141.cu | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <cuda.h>
#include <gptl.h>
#include <gptl_cuda.h>
__global__ void runit (int *, int, int, int, int *); // global routine drives GPU calculations
__device__ int get_shared_idx (int, int, int *, uint);
__device__ int find_shared_idx (int, int);
__device__ void get_mutex (volatile int *);
__device__ void free_mutex (volatile int *);
#define WARPS_PER_SM 4
#define MAX_OVERSUB 1
#define SHARED_LOCS_PER_SM (WARPS_PER_SM * MAX_OVERSUB)
__shared__ int timer[SHARED_LOCS_PER_SM];
__device__ volatile int mutex1[5] = {0,0,0,0,0};
__device__ volatile int mutex2[5] = {0,0,0,0,0};
typedef struct {
int warp;
bool inuse;
} Map;
__shared__ volatile Map map[SHARED_LOCS_PER_SM];
int main (int argc, char **argv)
{
int ret; // return code
int c; // for getopt
int khz, warpsize, devnum, smcount, cores_per_sm, cores_per_gpu;
float oversub; // oversubscription fraction (diagnostic)
int blocksize; // blocksize for kernel launched
int nblocks; // number of blocks on the GPU given blocksize and oversub
int niter; // total number of iterations: default cores_per_gpu
int extraiter = 0; // number of extra iterations that don't complete a block
int nwarps; // total number warps in the computation
int warps_per_sm;
int shared_locs_per_sm;
int *global_timer;
int nbad;
int *maxidx; // diagnostic: max index used
// Retrieve information about the GPU and set defaults
ret = GPTLget_gpu_props (&khz, &warpsize, &devnum, &smcount, &cores_per_sm, &cores_per_gpu);
warps_per_sm = cores_per_sm / warpsize;
blocksize = cores_per_sm; // default
niter = cores_per_gpu; // default
// Check for user-supplied input to override defaults
while ((c = getopt (argc, argv, "b:n:")) != -1) {
switch (c) {
case 'b':
if ((blocksize = atoi (optarg)) < 1) {
printf ("blocksize (-b <arg>)must be > 0 %d is invalid\n", blocksize);
return -1;
}
break;
case 'n':
if ((niter = atoi (optarg)) < 1) {
printf ("niter (-n <arg>) must be > 0 %d is invalid\n", niter);
return -1;
}
break;
default:
printf ("unknown option %c\n", c);
printf ("Usage: %s [-b blocksize] [-k] [-n niter]\n", argv[0]);
return 2;
}
}
oversub = (float) niter / cores_per_gpu;
printf ("oversubscription factor=%f\n", oversub); // diagnostic only
if (oversub > MAX_OVERSUB)
printf ("WARNING: oversub=%f MAX_OVERSUB=%d\n", oversub, MAX_OVERSUB);
shared_locs_per_sm = warps_per_sm * MAX_OVERSUB;
nwarps = niter / warpsize;
if (niter % warpsize != 0)
nwarps = niter / warpsize + 1;
nblocks = niter / blocksize;
if (niter % blocksize != 0) {
extraiter = niter - nblocks*blocksize; // diagnostic only
nblocks = niter / blocksize + 1;
}
printf ("%d iters broken into nblocks=%d blocksize=%d\n", niter, nblocks, blocksize);
if (niter % blocksize != 0)
printf ("Last iteration will be only %d elements\n", extraiter);
// Initialize the GPTL library on CPU and GPU. This is mandatory before any start/stop
ret = GPTLinitialize ();
(void) (cudaMallocManaged ((void **) &global_timer, sizeof (int) * nwarps));
(void) (cudaMallocManaged ((void **) &maxidx, sizeof (int))); // diagnostic
for (int n = 0; n < nwarps; ++n)
global_timer[n] = 0;
runit<<<nblocks,blocksize>>> (global_timer, shared_locs_per_sm, warpsize, nwarps, maxidx);
printf ("main calling cudaDeviceSynchronize ()\n");
cudaDeviceSynchronize (); // Ensure the GPU has finished the kernel before returning to CPU
printf ("main done calling cudaDeviceSynchronize ()\n");
printf ("Final value of global_timer: should be 1 everywhere\n");
nbad = 0;
for (int n = 0; n < nwarps; ++n)
if (global_timer[n] != 1)
++nbad;
printf ("Number of bad values in global_timer=%d\n", nbad);
printf ("maxidx=%d\n", *maxidx);
return 0;
}
__device__ void get_mutex (volatile int *mutex)
{
bool isSet;
do {
// If mutex is 0, grab by setting = 1
// If mutex is 1, it stays 1 and isSet will be false
isSet = atomicCAS ((int *) mutex, 0, 1) == 0;
} while ( !isSet); // exit the loop after critical section executed
}
__device__ void free_mutex (volatile int *mutex)
{
*mutex = 0;
}
// This routine will be called at start of GPTLstart_gpu
__device__ int get_shared_idx (int mywarp, int shared_locs_per_sm, int *maxidx, uint smid)
{
int idx;
int tot;
// Use critical section to get index into shared array
get_mutex (&mutex1[smid]);
for (idx = 0; idx < shared_locs_per_sm; ++idx) {
if ( ! map[idx].inuse) {
map[idx].inuse = true;
map[idx].warp = mywarp;
if (idx > *maxidx)
*maxidx = idx; // diagnostic: max index used
break;
}
}
if (idx == shared_locs_per_sm) {
printf ("mywarp %d no spots available!\n", mywarp);
free_mutex (&mutex1[smid]);
return idx;
}
free_mutex (&mutex1[smid]);
return idx;
}
// This routine will be called at start of GPTLstop_gpu
__device__ int find_shared_idx (int mywarp, int shared_locs_per_sm)
{
int n;
int idx;
for (idx = 0; idx < shared_locs_per_sm; ++idx) {
if (map[idx].inuse && map[idx].warp == mywarp) {
break;
}
}
return idx;
}
__global__ void runit (int *global_timer, int shared_locs_per_sm, int warpsize, int nwarps,
int *maxidx)
{
int mywarp, mythread;
int idx;
int idxsave;
int ret;
float sleeptime;
uint smid;
bool isSet;
ret = GPTLget_warp_thread (&mywarp, &mythread);
if (mythread % warpsize == 0) {
// Stuff done in GPTLstart_gpu
asm ("mov.u32 %0, %smid;" : "=r"(smid));
idx = get_shared_idx (mywarp, shared_locs_per_sm, maxidx, smid); // grab shared mem slot
if (idx == shared_locs_per_sm) {
printf ("runit: no slots available\n");
return;
}
printf ("runit mywarp %d smid %d got idx %d\n", mywarp, smid, idx);
timer[idx] = global_timer[mywarp];
// Stuff done in GPTLstop_gpu
idxsave = idx;
idx = find_shared_idx (mywarp, shared_locs_per_sm);
if (idx == shared_locs_per_sm) {
printf ("runit: mywarp %d smid %d couldn't find my slot got inuse by warp %d\n",
mywarp, smid, map[idxsave].warp);
return;
}
// Serialize the work should force use of all available idx values
//get_mutex (&mutex2[smid]);
sleeptime = 0.1;
sleeptime = 0.0001 * mywarp;
sleeptime = 0.01;
GPTLmy_sleep (sleeptime);
++timer[idx];
global_timer[mywarp] = timer[idx];
// Reset shared memory to zero. Otherwise get random hangs due to shared memory cannot be
// guaranteed initialized to zero.
map[idx].inuse = false;
map[idx].warp = 0;
if (false)
printf ("mywarp %d released idx %d\n", mywarp, idx);
//free_mutex (&mutex2[smid]);
}
return;
}
|
88c933c7f3f5391c0109375dec3a5f1ef960d990.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <stdlib.h>
struct results {
float sum;
};
#include "summation_kernel.cu"
// CPU implementation
float log2_series(int n) {
int i;
float res = 0;
for (i = n - 1; i >= 0; --i)
{
if(i%2==0) res += 1.0/(i+1.0);
else res -= 1.0/(i+1.0);
}
return res;
}
int main(int argc, char ** argv) {
int data_size = 1024 * 1024 * 128;
// Run CPU version
double start_time = getclock();
float log2 = log2_series(data_size);
double end_time = getclock();
printf("CPU result: %f\n", log2);
printf(" log(2)=%f\n", log(2.0));
printf(" time=%fs\n", end_time - start_time);
// Parameter definition
int threads_per_block = 4 * 32;
int blocks_in_grid = 8;
int num_threads = threads_per_block * blocks_in_grid;
// Timer initialization
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
int results_size = num_threads;
// As the input is just an int, we don't have to allocate GPU memory for it
// We'll just pass it as parameter
// Allocating output data on CPU
float data_out_cpu[results_size];
// Allocating output data on GPU
float * data_out_gpu;
hipMalloc((void **)&data_out_gpu, results_size*sizeof(float));
// Start timer
CUDA_SAFE_CALL(hipEventRecord(start, 0));
// Launch kernel
hipLaunchKernelGGL(( summation_kernel), dim3(blocks_in_grid), dim3(threads_per_block), 0, 0, data_size, data_out_gpu);
// Stop timer
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
// Get results back
hipMemcpy(data_out_cpu, data_out_gpu, results_size*sizeof(float), hipMemcpyDeviceToHost);
// Finish reduction
float sum = 0.;
int i, units_per_thread = data_size / num_threads;
for (i = 0; i < results_size; ++i) {
if((i*units_per_thread)%2==0) res += data_out_cpu[i];
else res -= data_out_cpu[i];
}
// Cleanup
hipFree(data_out_gpu);
printf("GPU results:\n");
printf(" Sum: %f\n", sum);
float elapsedTime;
CUDA_SAFE_CALL(hipEventElapsedTime(&elapsedTime, start, stop)); // In ms
double total_time = elapsedTime / 1000.; // s
double time_per_iter = total_time / (double)data_size;
double bandwidth = sizeof(float) / time_per_iter; // B/s
printf(" Total time: %g s,\n Per iteration: %g ns\n Throughput: %g GB/s\n",
total_time,
time_per_iter * 1.e9,
bandwidth / 1.e9);
CUDA_SAFE_CALL(hipEventDestroy(start));
CUDA_SAFE_CALL(hipEventDestroy(stop));
return 0;
} | 88c933c7f3f5391c0109375dec3a5f1ef960d990.cu | #include "utils.h"
#include <stdlib.h>
struct results {
float sum;
};
#include "summation_kernel.cu"
// CPU implementation
float log2_series(int n) {
int i;
float res = 0;
for (i = n - 1; i >= 0; --i)
{
if(i%2==0) res += 1.0/(i+1.0);
else res -= 1.0/(i+1.0);
}
return res;
}
int main(int argc, char ** argv) {
int data_size = 1024 * 1024 * 128;
// Run CPU version
double start_time = getclock();
float log2 = log2_series(data_size);
double end_time = getclock();
printf("CPU result: %f\n", log2);
printf(" log(2)=%f\n", log(2.0));
printf(" time=%fs\n", end_time - start_time);
// Parameter definition
int threads_per_block = 4 * 32;
int blocks_in_grid = 8;
int num_threads = threads_per_block * blocks_in_grid;
// Timer initialization
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
int results_size = num_threads;
// As the input is just an int, we don't have to allocate GPU memory for it
// We'll just pass it as parameter
// Allocating output data on CPU
float data_out_cpu[results_size];
// Allocating output data on GPU
float * data_out_gpu;
cudaMalloc((void **)&data_out_gpu, results_size*sizeof(float));
// Start timer
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
// Launch kernel
summation_kernel<<<blocks_in_grid, threads_per_block>>>(data_size, data_out_gpu);
// Stop timer
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
// Get results back
cudaMemcpy(data_out_cpu, data_out_gpu, results_size*sizeof(float), cudaMemcpyDeviceToHost);
// Finish reduction
float sum = 0.;
int i, units_per_thread = data_size / num_threads;
for (i = 0; i < results_size; ++i) {
if((i*units_per_thread)%2==0) res += data_out_cpu[i];
else res -= data_out_cpu[i];
}
// Cleanup
cudaFree(data_out_gpu);
printf("GPU results:\n");
printf(" Sum: %f\n", sum);
float elapsedTime;
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsedTime, start, stop)); // In ms
double total_time = elapsedTime / 1000.; // s
double time_per_iter = total_time / (double)data_size;
double bandwidth = sizeof(float) / time_per_iter; // B/s
printf(" Total time: %g s,\n Per iteration: %g ns\n Throughput: %g GB/s\n",
total_time,
time_per_iter * 1.e9,
bandwidth / 1.e9);
CUDA_SAFE_CALL(cudaEventDestroy(start));
CUDA_SAFE_CALL(cudaEventDestroy(stop));
return 0;
} |
62127a321e1c248b2459a5ab37b9188335963d4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/ClassNLLCriterion.cu"
#else
void THNN_(ClassNLLCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index) {
if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) {
THError("multi-target not supported");
}
int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input);
int n_classes = THCTensor_(size)(state, input, n_dims - 1);
ignore_index -= TH_INDEX_BASE;
if (weights) {
THCUNN_assertSameGPU(
state, 5, input, target, weights, output, total_weight
);
} else {
THCUNN_assertSameGPU(
state, 4, input, target, output, total_weight
);
}
THArgCheck(!input->is_empty() && (n_dims <= 2 && n_dims > 0), 2, "non-empty vector or matrix expected");
int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0);
int64_t num_targets = THCudaLongTensor_size(state, target, 0);
THArgCheck(batch_size == num_targets,
2, "mismatch between the batch size of input (%ld) and that of target (%ld)",
batch_size, num_targets);
if (weights && THCTensor_(nElement)(state, weights) != n_classes) {
THCDescBuff s1 = THCTensor_(sizeDesc)(state, weights);
THError("weight tensor should be defined either for all %d classes or no classes"
" but got weight tensor of shape: %s", n_classes, s1.str);
}
if (reduction == Reduction::None && n_dims == 2) {
THCTensor_(resize1d)(state, output, batch_size);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
hipLaunchKernelGGL(( ClassNLLCriterion_updateOutput_no_reduce_kernel<real>)
, dim3(GET_BLOCKS(batch_size)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
batch_size,
toDeviceTensor<real, 2>(state, input),
toDeviceTensor<THCIndex_t, 1>(state, target),
toDeviceTensor<real, 1>(state, output),
weights ? THCTensor_(data)(state, weights) : NULL,
n_classes,
ignore_index);
THCudaCheck(hipGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
THCTensor_(resize1d)(state, output, 1);
THCTensor_(resize1d)(state, total_weight, 1);
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
real *input_data = THCTensor_(data)(state, input);
real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
real *output_data = THCTensor_(data)(state, output);
real *total_weight_data = THCTensor_(data)(state, total_weight);
if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) {
hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateOutput_kernel1<real>)
, dim3(1), dim3(1), 0, THCState_getCurrentStream(state),
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
reduction == Reduction::ElementwiseMean,
n_classes,
ignore_index
);
} else if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 2) {
hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateOutput_kernel<real, accreal>)
, dim3(1), dim3(NTHREADS), 0, THCState_getCurrentStream(state),
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
reduction == Reduction::ElementwiseMean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
n_classes,
ignore_index
);
}
THCudaCheck(hipGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
void THNN_(ClassNLLCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index) {
if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) {
THError("multi-target not supported");
}
int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input);
int n_classes = THCTensor_(size)(state, input, n_dims - 1);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4, "gradInput must be contiguous");
if (weights) {
THCUNN_assertSameGPU(
state, 5, weights, input, target, gradInput, total_weight
);
}
else {
THCUNN_assertSameGPU(
state, 4, input, target, gradInput, total_weight
);
}
THArgCheck(!input->is_empty() && (n_dims <= 2 && n_dims > 0), 2, "non-empty vector or matrix expected");
int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0);
int64_t num_targets = THCudaLongTensor_size(state, target, 0);
THArgCheck(batch_size == num_targets,
2, "mismatch between the batch size of input (%ld) and that of target (%ld)",
batch_size, num_targets);
if (weights && THCTensor_(nElement)(state, weights) != n_classes) {
THError("weight tensor should be defined either for all or no classes");
}
if (reduction == Reduction::None && n_dims == 2) {
THCUNN_check_dim_size(state, gradOutput, 1, 0, batch_size);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
hipLaunchKernelGGL(( ClassNLLCriterion_updateGradInput_no_reduce_kernel<real>)
, dim3(GET_BLOCKS(batch_size)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
batch_size,
toDeviceTensor<THCIndex_t, 1>(state, target),
toDeviceTensor<real, 1>(state, gradOutput),
toDeviceTensor<real, 2>(state, gradInput),
weights ? THCTensor_(data)(state, weights) : NULL,
n_classes,
ignore_index);
THCudaCheck(hipGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
ignore_index -= TH_INDEX_BASE;
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
real *gradOutput_data = THCTensor_(data)(state, gradOutput);
real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
real *gradInput_data = THCTensor_(data)(state, gradInput);
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
real *total_weight_data = THCTensor_(data)(state, total_weight);
if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) {
hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateGradInput_kernel1<real>)
, dim3(1), dim3(1), 0, THCState_getCurrentStream(state),
gradInput_data,
gradOutput_data,
weights_data,
target_data,
total_weight_data,
reduction == Reduction::ElementwiseMean,
n_classes,
ignore_index
);
} else {
hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateGradInput_kernel<real>)
, dim3(1), dim3(NTHREADS), 0, THCState_getCurrentStream(state),
gradInput_data,
gradOutput_data,
target_data,
weights_data,
total_weight_data,
reduction == Reduction::ElementwiseMean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
n_classes,
ignore_index
);
}
THCudaCheck(hipGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
THCIndexTensor_(free)(state, target);
}
#endif
| 62127a321e1c248b2459a5ab37b9188335963d4e.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/ClassNLLCriterion.cu"
#else
void THNN_(ClassNLLCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index) {
if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) {
THError("multi-target not supported");
}
int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input);
int n_classes = THCTensor_(size)(state, input, n_dims - 1);
ignore_index -= TH_INDEX_BASE;
if (weights) {
THCUNN_assertSameGPU(
state, 5, input, target, weights, output, total_weight
);
} else {
THCUNN_assertSameGPU(
state, 4, input, target, output, total_weight
);
}
THArgCheck(!input->is_empty() && (n_dims <= 2 && n_dims > 0), 2, "non-empty vector or matrix expected");
int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0);
int64_t num_targets = THCudaLongTensor_size(state, target, 0);
THArgCheck(batch_size == num_targets,
2, "mismatch between the batch size of input (%ld) and that of target (%ld)",
batch_size, num_targets);
if (weights && THCTensor_(nElement)(state, weights) != n_classes) {
THCDescBuff s1 = THCTensor_(sizeDesc)(state, weights);
THError("weight tensor should be defined either for all %d classes or no classes"
" but got weight tensor of shape: %s", n_classes, s1.str);
}
if (reduction == Reduction::None && n_dims == 2) {
THCTensor_(resize1d)(state, output, batch_size);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
ClassNLLCriterion_updateOutput_no_reduce_kernel<real>
<<<GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
batch_size,
toDeviceTensor<real, 2>(state, input),
toDeviceTensor<THCIndex_t, 1>(state, target),
toDeviceTensor<real, 1>(state, output),
weights ? THCTensor_(data)(state, weights) : NULL,
n_classes,
ignore_index);
THCudaCheck(cudaGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
THCTensor_(resize1d)(state, output, 1);
THCTensor_(resize1d)(state, total_weight, 1);
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
real *input_data = THCTensor_(data)(state, input);
real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
real *output_data = THCTensor_(data)(state, output);
real *total_weight_data = THCTensor_(data)(state, total_weight);
if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) {
cunn_ClassNLLCriterion_updateOutput_kernel1<real>
<<<1, 1, 0, THCState_getCurrentStream(state)>>>(
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
reduction == Reduction::ElementwiseMean,
n_classes,
ignore_index
);
} else if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 2) {
cunn_ClassNLLCriterion_updateOutput_kernel<real, accreal>
<<<1, NTHREADS, 0, THCState_getCurrentStream(state)>>>(
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
reduction == Reduction::ElementwiseMean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
n_classes,
ignore_index
);
}
THCudaCheck(cudaGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
void THNN_(ClassNLLCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index) {
if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) {
THError("multi-target not supported");
}
int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input);
int n_classes = THCTensor_(size)(state, input, n_dims - 1);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4, "gradInput must be contiguous");
if (weights) {
THCUNN_assertSameGPU(
state, 5, weights, input, target, gradInput, total_weight
);
}
else {
THCUNN_assertSameGPU(
state, 4, input, target, gradInput, total_weight
);
}
THArgCheck(!input->is_empty() && (n_dims <= 2 && n_dims > 0), 2, "non-empty vector or matrix expected");
int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0);
int64_t num_targets = THCudaLongTensor_size(state, target, 0);
THArgCheck(batch_size == num_targets,
2, "mismatch between the batch size of input (%ld) and that of target (%ld)",
batch_size, num_targets);
if (weights && THCTensor_(nElement)(state, weights) != n_classes) {
THError("weight tensor should be defined either for all or no classes");
}
if (reduction == Reduction::None && n_dims == 2) {
THCUNN_check_dim_size(state, gradOutput, 1, 0, batch_size);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
ClassNLLCriterion_updateGradInput_no_reduce_kernel<real>
<<<GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
batch_size,
toDeviceTensor<THCIndex_t, 1>(state, target),
toDeviceTensor<real, 1>(state, gradOutput),
toDeviceTensor<real, 2>(state, gradInput),
weights ? THCTensor_(data)(state, weights) : NULL,
n_classes,
ignore_index);
THCudaCheck(cudaGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
ignore_index -= TH_INDEX_BASE;
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
real *gradOutput_data = THCTensor_(data)(state, gradOutput);
real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
real *gradInput_data = THCTensor_(data)(state, gradInput);
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
real *total_weight_data = THCTensor_(data)(state, total_weight);
if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) {
cunn_ClassNLLCriterion_updateGradInput_kernel1<real>
<<<1, 1, 0, THCState_getCurrentStream(state)>>>(
gradInput_data,
gradOutput_data,
weights_data,
target_data,
total_weight_data,
reduction == Reduction::ElementwiseMean,
n_classes,
ignore_index
);
} else {
cunn_ClassNLLCriterion_updateGradInput_kernel<real>
<<<1, NTHREADS, 0, THCState_getCurrentStream(state)>>>(
gradInput_data,
gradOutput_data,
target_data,
weights_data,
total_weight_data,
reduction == Reduction::ElementwiseMean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
n_classes,
ignore_index
);
}
THCudaCheck(cudaGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
THCIndexTensor_(free)(state, target);
}
#endif
|
3c6e56582b3dc4b983b1fc392d3e9df888f7259e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* purpose: matrix-matrix multiplication using simple square matrices of
* dimension N x N; this is the shared memory variant where
* a particular thread-block (BS x BS) operates on a sequence of
* pairs of submatrices also dimensioned BS x BS in a
* dot-product-like fashion to form a particular sub-block
* in the result matrix; the block indices of the block-grid
* may serve to navigate through the matrices and identify
* individual submatrices;
* n.b. 3072 % 16 = N % BS = 0
* result: much faster than without using shared memory, ie approx 10x
* compilation: nvcc ./mmm_example_3.cu
* usage: ./a.out
*/
#include <stdio.h>
#define N 4800
#define BS 16
/*
* GPU kernel for straightforward matrix-matrix multiplication
* using shared memory
* n.b. i,j are array/matrix indices starting from 0,1,2...
* at first sight it may be confusing that i goes with threadIdx.y
* while j corresponds to threadIdx.x
*/
__global__ void KrnlMMM(float **A, float **B, float **C)
{
int i, j, k, Blckk;
__shared__ float BlckA[BS][BS], BlckB[BS][BS], BlckC[BS][BS];
float tmpC;
/*
* target block to compute here starts at element
* C[blockIdx.y*BS][blockIdx.x*BS] and will involve
* all combinations of blocks starting with
* A[blockIdx.y*BS][0BS,1BS,2BS...]
* and
* B[0BS,1BS,2BS...][blockIdx.x*BS]
* where the latter series of A/B blocks will be combined
* in a dot-product-like fashion, so that eventually we
* will end up with a number of N / BS such block-products;
* at first we shall initialize our resulting block, which
* is easily achieved by letting all threads operate on their
* target item in parallel;
*/
i = threadIdx.y;
j = threadIdx.x;
BlckC[i][j] = (float) 0;
for (Blckk = 0; Blckk < (N/BS); Blckk++) {
/*
* get sub-matrices into shared memory; this will also be
* easy since we are working with a threadblock of size
* BS x BS, hence each thread just needs to copy a single
* element from global into shared memory
*/
BlckA[i][j] = A[(blockIdx.y*BS)+i][(Blckk*BS)+j];
BlckB[i][j] = B[(Blckk*BS)+i][(blockIdx.x*BS)+j];
__syncthreads();
/*
* next the currently selected pair of BlckA[][]
* and BlckB[][] shall be multiplied together such
* that each individual thread i,j determines its own
* target element inside BlckC[][]
*/
tmpC = (float) 0;
for (k = 0; k<BS; k++) {
tmpC += BlckA[i][k] * BlckB[k][j];
}
BlckC[i][j] += tmpC;
__syncthreads();
}
/*
* and the only thing remaining is to copy back the resulting submatrix
* BlckC[][] into global memory to a particular sector of matrix
* C[][]; again, this can be done in parallel by all threads
* each of them copying just a single element
*/
C[(blockIdx.y*BS)+i][(blockIdx.x*BS)+j] = BlckC[i][j];
__syncthreads();
return;
}
/*
* host main
*/
int main()
{
int i, j, k, i0, j0;
dim3 threadsPerBlock, numBlocks;
float **A, **B, **C, tC;
/*
* using CUDA unified memory, first allocate
* the arrays in convenient 2D format, then
* initialize with some dummy content
*/
srand(time(0));
hipMallocManaged(&A, N * sizeof(float *));
hipMallocManaged(&B, N * sizeof(float *));
hipMallocManaged(&C, N * sizeof(float *));
for (i = 0; i < N; i++) {
hipMallocManaged(&A[i], N * sizeof(float));
hipMallocManaged(&B[i], N * sizeof(float));
hipMallocManaged(&C[i], N * sizeof(float));
for (j = 0; j < N; j++) {
A[i][j] = (float) rand() / (float) RAND_MAX;
B[i][j] = (float) 1 / A[i][j];
C[i][j] = (float) 0;
}
}
/*
* next we want to call a simple kernel that carries out
* matrix-matrix multiplication of A x B and stores the results
* into C; this is an improved version based on shared memory
*/
threadsPerBlock.x = BS;
threadsPerBlock.y = BS;
numBlocks.x = N / threadsPerBlock.x;
numBlocks.y = N / threadsPerBlock.y;
hipLaunchKernelGGL(( KrnlMMM), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, A, B, C);
hipDeviceSynchronize();
/*
* just pick a random item and compute it explicitly for a check
*/
i0 = (int) ((float) N * A[0][0]);
j0 = (int) ((float) N * A[1][1]);
tC = (float) 0;
for (k=0; k<N; k++) {
tC += A[i0][k] * B[k][j0];
}
printf("checking C[%d][%d]\n", i0, j0);
printf("explicit calc %6.3f\n", tC);
printf("kernel calc %6.3f\n", C[i0][j0]);
tC = (float) 0;
for (k=0; k<N; k++) {
tC += A[0][k] * B[k][0];
}
printf("checking C[0][0]\n");
printf("explicit calc %6.3f\n", tC);
printf("kernel calc %6.3f\n", C[0][0]);
hipFree(C);
hipFree(B);
hipFree(A);
return(0);
}
| 3c6e56582b3dc4b983b1fc392d3e9df888f7259e.cu | /*
* purpose: matrix-matrix multiplication using simple square matrices of
* dimension N x N; this is the shared memory variant where
* a particular thread-block (BS x BS) operates on a sequence of
* pairs of submatrices also dimensioned BS x BS in a
* dot-product-like fashion to form a particular sub-block
* in the result matrix; the block indices of the block-grid
* may serve to navigate through the matrices and identify
* individual submatrices;
* n.b. 3072 % 16 = N % BS = 0
* result: much faster than without using shared memory, ie approx 10x
* compilation: nvcc ./mmm_example_3.cu
* usage: ./a.out
*/
#include <stdio.h>
#define N 4800
#define BS 16
/*
* GPU kernel for straightforward matrix-matrix multiplication
* using shared memory
* n.b. i,j are array/matrix indices starting from 0,1,2...
* at first sight it may be confusing that i goes with threadIdx.y
* while j corresponds to threadIdx.x
*/
__global__ void KrnlMMM(float **A, float **B, float **C)
{
int i, j, k, Blckk;
__shared__ float BlckA[BS][BS], BlckB[BS][BS], BlckC[BS][BS];
float tmpC;
/*
* target block to compute here starts at element
* C[blockIdx.y*BS][blockIdx.x*BS] and will involve
* all combinations of blocks starting with
* A[blockIdx.y*BS][0BS,1BS,2BS...]
* and
* B[0BS,1BS,2BS...][blockIdx.x*BS]
* where the latter series of A/B blocks will be combined
* in a dot-product-like fashion, so that eventually we
* will end up with a number of N / BS such block-products;
* at first we shall initialize our resulting block, which
* is easily achieved by letting all threads operate on their
* target item in parallel;
*/
i = threadIdx.y;
j = threadIdx.x;
BlckC[i][j] = (float) 0;
for (Blckk = 0; Blckk < (N/BS); Blckk++) {
/*
* get sub-matrices into shared memory; this will also be
* easy since we are working with a threadblock of size
* BS x BS, hence each thread just needs to copy a single
* element from global into shared memory
*/
BlckA[i][j] = A[(blockIdx.y*BS)+i][(Blckk*BS)+j];
BlckB[i][j] = B[(Blckk*BS)+i][(blockIdx.x*BS)+j];
__syncthreads();
/*
* next the currently selected pair of BlckA[][]
* and BlckB[][] shall be multiplied together such
* that each individual thread i,j determines its own
* target element inside BlckC[][]
*/
tmpC = (float) 0;
for (k = 0; k<BS; k++) {
tmpC += BlckA[i][k] * BlckB[k][j];
}
BlckC[i][j] += tmpC;
__syncthreads();
}
/*
* and the only thing remaining is to copy back the resulting submatrix
* BlckC[][] into global memory to a particular sector of matrix
* C[][]; again, this can be done in parallel by all threads
* each of them copying just a single element
*/
C[(blockIdx.y*BS)+i][(blockIdx.x*BS)+j] = BlckC[i][j];
__syncthreads();
return;
}
/*
* host main
*/
int main()
{
int i, j, k, i0, j0;
dim3 threadsPerBlock, numBlocks;
float **A, **B, **C, tC;
/*
* using CUDA unified memory, first allocate
* the arrays in convenient 2D format, then
* initialize with some dummy content
*/
srand(time(0));
cudaMallocManaged(&A, N * sizeof(float *));
cudaMallocManaged(&B, N * sizeof(float *));
cudaMallocManaged(&C, N * sizeof(float *));
for (i = 0; i < N; i++) {
cudaMallocManaged(&A[i], N * sizeof(float));
cudaMallocManaged(&B[i], N * sizeof(float));
cudaMallocManaged(&C[i], N * sizeof(float));
for (j = 0; j < N; j++) {
A[i][j] = (float) rand() / (float) RAND_MAX;
B[i][j] = (float) 1 / A[i][j];
C[i][j] = (float) 0;
}
}
/*
* next we want to call a simple kernel that carries out
* matrix-matrix multiplication of A x B and stores the results
* into C; this is an improved version based on shared memory
*/
threadsPerBlock.x = BS;
threadsPerBlock.y = BS;
numBlocks.x = N / threadsPerBlock.x;
numBlocks.y = N / threadsPerBlock.y;
KrnlMMM<<<numBlocks, threadsPerBlock>>>(A, B, C);
cudaDeviceSynchronize();
/*
* just pick a random item and compute it explicitly for a check
*/
i0 = (int) ((float) N * A[0][0]);
j0 = (int) ((float) N * A[1][1]);
tC = (float) 0;
for (k=0; k<N; k++) {
tC += A[i0][k] * B[k][j0];
}
printf("checking C[%d][%d]\n", i0, j0);
printf("explicit calc %6.3f\n", tC);
printf("kernel calc %6.3f\n", C[i0][j0]);
tC = (float) 0;
for (k=0; k<N; k++) {
tC += A[0][k] * B[k][0];
}
printf("checking C[0][0]\n");
printf("explicit calc %6.3f\n", tC);
printf("kernel calc %6.3f\n", C[0][0]);
cudaFree(C);
cudaFree(B);
cudaFree(A);
return(0);
}
|
bcecec5c1fc4dc38529d355c74ab87f0db8c2958.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/partition.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
// #define SORT_MATERIALS
#define CACHE_FIRST_BOUNCE
#define RAY_TERMINATION
#define ANTIALIASING
// #define PERF_RAY_TERMINATION
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if _DEBUG
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static Triangle* dev_triangles = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static ShadeableIntersection * dev_intersections_cache = NULL;
// static utilityCore::PerformanceTimer timer;
// TODO: static variables for device memory, any extra info you need, etc
// ...
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
hipMalloc(&dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections_cache, 0, pixelcount * sizeof(ShadeableIntersection));
int triSize = scene->triangles.size() * sizeof(Triangle);
hipMalloc(&dev_triangles, triSize);
hipMemcpy(dev_triangles, scene->triangles.data(), triSize, hipMemcpyHostToDevice);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
hipFree(dev_intersections_cache);
hipFree(dev_triangles);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
float px = (float)x;
float py = (float)y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
#ifdef ANTIALIASING
px += u01(rng);
py += u01(rng);
#endif // ANTIALIASING
// Motion blur
thrust::normal_distribution<float> n01(0, 1);
float t = abs(n01(rng));
glm::vec3 view = cam.view * (1 - t) + (cam.view + cam.motion) * t;
segment.ray.direction = glm::normalize(view
- cam.right * cam.pixelLength.x * (px - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * (py - (float)cam.resolution.y * 0.5f)
);
if (cam.depth_of_field) {
// sample point on lens
float r = u01(rng) * cam.lens_radius;
float theta = u01(rng) * 2 * PI;
glm::vec3 p_lens(r * cos(theta), r * sin(theta), 0.0f);
// compute point on plane of focus
float ft = cam.focal_distance / glm::abs(segment.ray.direction.z);
glm::vec3 p_focus = segment.ray.origin + ft * segment.ray.direction;
// update ray for effect of lens
segment.ray.origin += p_lens;
segment.ray.direction = glm::normalize(p_focus - segment.ray.origin);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, Triangle *meshes
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
#ifndef RAY_TERMINATION
if (pathSegment.remainingBounces <= 0) {
return;
}
#endif
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
} else if (geom.type == MESH) {
t = meshIntersectionTest(geom, meshes, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
__global__ void shadeRealMaterial(
int iter,
int depth
, int num_paths
, ShadeableIntersection* shadeableIntersections
, PathSegment* pathSegments
, Material* materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
#ifndef RAY_TERMINATION
if (pathSegments[idx].remainingBounces <= 0) {
return;
}
#endif
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = -1;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
else {
glm::vec3 intersect = pathSegments[idx].ray.origin + intersection.t * pathSegments[idx].ray.direction;
scatterRay(pathSegments[idx], intersect, intersection.surfaceNormal, material, rng);
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = -1;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
struct isPathTerminated {
__device__ bool operator()(const PathSegment &path) {
return path.remainingBounces > 0;
}
};
struct materialIdCmp {
__device__ bool operator()(const ShadeableIntersection& s1, const ShadeableIntersection &s2) {
return s1.materialId < s2.materialId;
}
};
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * Finally, add this iteration's results to the image. This has been done
// for you.
hipLaunchKernelGGL(( generateRayFromCamera) , dim3(blocksPerGrid2d), dim3(blockSize2d) , 0, 0, cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
#ifdef PERF_RAY_TERMINATION
std::vector<int> numPathRecord;
#endif
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
#ifdef PERF_RAY_TERMINATION
numPathRecord.push_back(num_paths);
#endif // PERF_RAY_TERMINATION
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
#if defined(CACHE_FIRST_BOUNCE) && !defined(ANTIALIASING)
if (depth == 0) {
if (iter == 1) {
hipLaunchKernelGGL(( computeIntersections), dim3(numblocksPathSegmentTracing), dim3(blockSize1d) , 0, 0,
depth
, num_paths
, dev_paths
, dev_geoms
, dev_triangles
, hst_scene->geoms.size()
, dev_intersections
);
hipMemcpy(dev_intersections_cache, dev_intersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
}
else {
hipMemcpy(dev_intersections, dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
}
}
else {
hipLaunchKernelGGL(( computeIntersections) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
depth
, num_paths
, dev_paths
, dev_geoms
, dev_triangles
, hst_scene->geoms.size()
, dev_intersections
);
}
#else
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, dev_triangles
, hst_scene->geoms.size()
, dev_intersections
);
#endif
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
#ifdef SORT_MATERIALS
thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, materialIdCmp());
#endif // SORT_MATERIALS
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
hipLaunchKernelGGL(( shadeRealMaterial), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
iter,
depth,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
hipDeviceSynchronize();
// Stream compaction
#ifdef RAY_TERMINATION
PathSegment *new_end = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, isPathTerminated());
num_paths = new_end - dev_paths;
iterationComplete = num_paths <= 0;
#else
iterationComplete = depth >= traceDepth;
#endif
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( finalGather), dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, pixelcount, dev_image, dev_paths);
#ifdef PERF_RAY_TERMINATION
std::cout << "Num path in iteration" << iter << " is " << std::endl;
for (int c : numPathRecord) {
std::cout << c << ",";
}
std::cout << std::endl;
#endif
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
| bcecec5c1fc4dc38529d355c74ab87f0db8c2958.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/partition.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
// #define SORT_MATERIALS
#define CACHE_FIRST_BOUNCE
#define RAY_TERMINATION
#define ANTIALIASING
// #define PERF_RAY_TERMINATION
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if _DEBUG
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static Triangle* dev_triangles = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static ShadeableIntersection * dev_intersections_cache = NULL;
// static utilityCore::PerformanceTimer timer;
// TODO: static variables for device memory, any extra info you need, etc
// ...
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
cudaMalloc(&dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections_cache, 0, pixelcount * sizeof(ShadeableIntersection));
int triSize = scene->triangles.size() * sizeof(Triangle);
cudaMalloc(&dev_triangles, triSize);
cudaMemcpy(dev_triangles, scene->triangles.data(), triSize, cudaMemcpyHostToDevice);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
cudaFree(dev_intersections_cache);
cudaFree(dev_triangles);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
float px = (float)x;
float py = (float)y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
#ifdef ANTIALIASING
px += u01(rng);
py += u01(rng);
#endif // ANTIALIASING
// Motion blur
thrust::normal_distribution<float> n01(0, 1);
float t = abs(n01(rng));
glm::vec3 view = cam.view * (1 - t) + (cam.view + cam.motion) * t;
segment.ray.direction = glm::normalize(view
- cam.right * cam.pixelLength.x * (px - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * (py - (float)cam.resolution.y * 0.5f)
);
if (cam.depth_of_field) {
// sample point on lens
float r = u01(rng) * cam.lens_radius;
float theta = u01(rng) * 2 * PI;
glm::vec3 p_lens(r * cos(theta), r * sin(theta), 0.0f);
// compute point on plane of focus
float ft = cam.focal_distance / glm::abs(segment.ray.direction.z);
glm::vec3 p_focus = segment.ray.origin + ft * segment.ray.direction;
// update ray for effect of lens
segment.ray.origin += p_lens;
segment.ray.direction = glm::normalize(p_focus - segment.ray.origin);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, Triangle *meshes
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
#ifndef RAY_TERMINATION
if (pathSegment.remainingBounces <= 0) {
return;
}
#endif
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
} else if (geom.type == MESH) {
t = meshIntersectionTest(geom, meshes, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
__global__ void shadeRealMaterial(
int iter,
int depth
, int num_paths
, ShadeableIntersection* shadeableIntersections
, PathSegment* pathSegments
, Material* materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
#ifndef RAY_TERMINATION
if (pathSegments[idx].remainingBounces <= 0) {
return;
}
#endif
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = -1;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
else {
glm::vec3 intersect = pathSegments[idx].ray.origin + intersection.t * pathSegments[idx].ray.direction;
scatterRay(pathSegments[idx], intersect, intersection.surfaceNormal, material, rng);
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = -1;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
struct isPathTerminated {
__device__ bool operator()(const PathSegment &path) {
return path.remainingBounces > 0;
}
};
struct materialIdCmp {
__device__ bool operator()(const ShadeableIntersection& s1, const ShadeableIntersection &s2) {
return s1.materialId < s2.materialId;
}
};
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * Finally, add this iteration's results to the image. This has been done
// for you.
generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
#ifdef PERF_RAY_TERMINATION
std::vector<int> numPathRecord;
#endif
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
#ifdef PERF_RAY_TERMINATION
numPathRecord.push_back(num_paths);
#endif // PERF_RAY_TERMINATION
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
#if defined(CACHE_FIRST_BOUNCE) && !defined(ANTIALIASING)
if (depth == 0) {
if (iter == 1) {
computeIntersections<<<numblocksPathSegmentTracing, blockSize1d >>> (
depth
, num_paths
, dev_paths
, dev_geoms
, dev_triangles
, hst_scene->geoms.size()
, dev_intersections
);
cudaMemcpy(dev_intersections_cache, dev_intersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
}
else {
cudaMemcpy(dev_intersections, dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
}
}
else {
computeIntersections <<<numblocksPathSegmentTracing, blockSize1d>>>(
depth
, num_paths
, dev_paths
, dev_geoms
, dev_triangles
, hst_scene->geoms.size()
, dev_intersections
);
}
#else
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, dev_triangles
, hst_scene->geoms.size()
, dev_intersections
);
#endif
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
#ifdef SORT_MATERIALS
thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, materialIdCmp());
#endif // SORT_MATERIALS
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
shadeRealMaterial<<<numblocksPathSegmentTracing, blockSize1d>>> (
iter,
depth,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
cudaDeviceSynchronize();
// Stream compaction
#ifdef RAY_TERMINATION
PathSegment *new_end = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, isPathTerminated());
num_paths = new_end - dev_paths;
iterationComplete = num_paths <= 0;
#else
iterationComplete = depth >= traceDepth;
#endif
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather<<<numBlocksPixels, blockSize1d>>>(pixelcount, dev_image, dev_paths);
#ifdef PERF_RAY_TERMINATION
std::cout << "Num path in iteration" << iter << " is " << std::endl;
for (int c : numPathRecord) {
std::cout << c << ",";
}
std::cout << std::endl;
#endif
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
a9d365e21eade59dfc89d7d7229e4f8d412028f7.hip | // !!! This is a file automatically generated by hipify!!!
#include <cuwrap/kernels/add.hpp>
#include <initializer_list>
#include <tuple>
namespace cuwrap {
template <typename T>
__global__ void kadd(T* lhs, T* rhs, T* out, std::size_t maxn)
{
int index = (blockDim.x * blockIdx.x) + threadIdx.x; // Thread id in one grid.
int stride = gridDim.x * blockDim.x; // Thread num for each grid.
for (std::size_t i = index; i < maxn; i += stride)
out[i] = lhs[i] + rhs[i];
}
// (std::size_t n, const T* lhs, const T* rhs, T* out, const kparam_t& param = kparam_t{})
template <typename T>
void add_impl_t<T>::operator()(std::size_t n, T* lhs, T* rhs, T* out, kparam_t param) // But well, there will be a lot of time wasted during each kernel section.
{
if (param.is_default_initialized())
param.adapt_amount(n);
T *cl, *cr;
CUWRAP_IF_CUDA_ERR(hipMalloc(&cl, n * sizeof(T)));
if (lhs == rhs)
cr = cl;
else
CUWRAP_IF_CUDA_ERR(hipMalloc(&cr, n * sizeof(T)));
CUWRAP_IF_CUDA_ERR(hipMemcpy(cl, lhs, n * sizeof(T), hipMemcpyHostToDevice));
if (lhs != rhs)
CUWRAP_IF_CUDA_ERR(hipMemcpy(cr, rhs, n * sizeof(T), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kadd), dim3(param.blocks), dim3(param.threads_per_block), param.shared_size, (ihipStream_t*)param.cuda_stream, cl, cr, cr, n);
CUWRAP_IF_CUDA_ERR(hipMemcpy(out, cr, n * sizeof(T), hipMemcpyDeviceToHost));
hipFree(cl);
if (lhs != rhs)
hipFree(cr);
// int mygpu = hipGetDevice(&mygpu); // TODO: Specify the custom setting for GPU choice.
// CUWRAP_IF_CUDA_ERR(hipMallocManaged(&lhs, sizeof(T) * n));
// if (lhs != rhs)
// CUWRAP_IF_CUDA_ERR(hipMallocManaged(&rhs, sizeof(T) * n));
// if (lhs != out && rhs != out)
// CUWRAP_IF_CUDA_ERR(hipMallocManaged(&out, sizeof(T) * n));
// CUWRAP_IF_CUDA_ERR(hipMemPrefetchAsync(lhs, sizeof(T) * n, mygpu)); // => GPU
// CUWRAP_IF_CUDA_ERR(hipMemPrefetchAsync(rhs, sizeof(T) * n, mygpu)); // => GPU
// CUWRAP_IF_CUDA_ERR(hipMemPrefetchAsync(out, sizeof(T) * n, mygpu)); // => GPU
//hipLaunchKernelGGL(( kadd), dim3(param.blocks), dim3(param.threads_per_block), param.shared_size, (ihipStream_t*)param.cuda_stream, lhs, rhs, out, n);
// CUWRAP_IF_CUDA_ERR(hipDeviceSynchronize());
// CUWRAP_IF_CUDA_ERR(hipMemPrefetchAsync(out, sizeof(T) * n, hipCpuDeviceId)); // => CPU
// hipFree(lhs);
// if (lhs != rhs)
// hipFree(rhs);
// if (lhs != out && rhs != out)
// hipFree(out);
}
template <typename... Ts>
static void force_initialization__()
{
// (add_impl<Ts>(std::size_t{}, nullptr, nullptr, nullptr), ...); // CUDA: We do not support CXX17 currently.
std::initializer_list<std::nullptr_t>{ ((add_impl_t<Ts>{})(std::size_t{}, nullptr, nullptr, nullptr, kparam_t{}), nullptr)... };
}
void force_initialization()
{
force_initialization__<CUWRAP_ARITHMETIC_TS>();
}
} // namespace cuwrap
// ----------------------------------------------------------------------------------------------------------------
// Here're some notes that you may just ignore:
// CPUGPU
// CPURAMGPU
// CPUCPU
//
// GPU``SM
//
// cudahost
// - hipMalloc
// - hipDeviceSynchronize
// - hipMemcpy
// - Free
// kernel
// kernel
// Unified Memory is a single memory address space accessible from any processor in a system.
//
// The important point here is that the Pascal GPU architecture is the first with hardware
// support for virtual memory page faulting and migration, via its Page Migration Engine.
// Older GPUs based on the Kepler and Maxwell architectures also support a more limited form
// of Unified Memory. (1070Ti ~ Pascal)
// Pascal GPUaccess(by GPU/CPU) hipMallocManaged()
// migration overheadtips:
// - Init data on GPU.
// - Unified Memory.
// CUBLAS VERSION ==================================================================
// #pragma once
// #include "../utils/util.hpp"
// #include <algorithm>
// #include <rocblas.h>
// #include <hip/hip_runtime.h>
// #include <device_launch_parameters.h>
// #include <vector>
// void test_cublas_vecadd()
// {
// using type = float;
// std::size_t size = 1 << 20;
// std::vector<type> cpu_data(size);
// std::generate(cpu_data.begin(), cpu_data.end(), std::rand);
// type *cuda_a, *cuda_b;
// hipMalloc(&cuda_a, sizeof(type) * size);
// hipMalloc(&cuda_b, sizeof(type) * size);
// // Cublas initialization
// hipblasHandle_t cublas_hander;
// hipblasCreate(&cublas_hander);
// // mem : cpu => gpu
// hipblasSetVector(size, sizeof(float), cpu_data.data(), 1, cuda_a, 1);
// hipblasSetVector(size, sizeof(float), cpu_data.data(), 1, cuda_b, 1);
// // @@
// ganler::timer t; // CuBlas is 5x faster than my code.
// // Now we got: a, b => we want: c = a*scale +b;
// constexpr type scale = 2.0;
// hipblasSaxpy(cublas_hander, size, &scale, cuda_a, 1, cuda_b, 1);
// t.print_milli();
// hipblasDestroy(cublas_hander);
// hipFree(cuda_a);
// hipFree(cuda_b);
// } | a9d365e21eade59dfc89d7d7229e4f8d412028f7.cu | #include <cuwrap/kernels/add.hpp>
#include <initializer_list>
#include <tuple>
namespace cuwrap {
template <typename T>
__global__ void kadd(T* lhs, T* rhs, T* out, std::size_t maxn)
{
int index = (blockDim.x * blockIdx.x) + threadIdx.x; // Thread id in one grid.
int stride = gridDim.x * blockDim.x; // Thread num for each grid.
for (std::size_t i = index; i < maxn; i += stride)
out[i] = lhs[i] + rhs[i];
}
// (std::size_t n, const T* lhs, const T* rhs, T* out, const kparam_t& param = kparam_t{})
template <typename T>
void add_impl_t<T>::operator()(std::size_t n, T* lhs, T* rhs, T* out, kparam_t param) // But well, there will be a lot of time wasted during each kernel section.
{
if (param.is_default_initialized())
param.adapt_amount(n);
T *cl, *cr;
CUWRAP_IF_CUDA_ERR(cudaMalloc(&cl, n * sizeof(T)));
if (lhs == rhs)
cr = cl;
else
CUWRAP_IF_CUDA_ERR(cudaMalloc(&cr, n * sizeof(T)));
CUWRAP_IF_CUDA_ERR(cudaMemcpy(cl, lhs, n * sizeof(T), cudaMemcpyHostToDevice));
if (lhs != rhs)
CUWRAP_IF_CUDA_ERR(cudaMemcpy(cr, rhs, n * sizeof(T), cudaMemcpyHostToDevice));
kadd<<<param.blocks, param.threads_per_block, param.shared_size, (CUstream_st*)param.cuda_stream>>>(cl, cr, cr, n);
CUWRAP_IF_CUDA_ERR(cudaMemcpy(out, cr, n * sizeof(T), cudaMemcpyDeviceToHost));
cudaFree(cl);
if (lhs != rhs)
cudaFree(cr);
// int mygpu = cudaGetDevice(&mygpu); // TODO: Specify the custom setting for GPU choice.
// CUWRAP_IF_CUDA_ERR(cudaMallocManaged(&lhs, sizeof(T) * n));
// if (lhs != rhs)
// CUWRAP_IF_CUDA_ERR(cudaMallocManaged(&rhs, sizeof(T) * n));
// if (lhs != out && rhs != out)
// CUWRAP_IF_CUDA_ERR(cudaMallocManaged(&out, sizeof(T) * n));
// CUWRAP_IF_CUDA_ERR(cudaMemPrefetchAsync(lhs, sizeof(T) * n, mygpu)); // => GPU
// CUWRAP_IF_CUDA_ERR(cudaMemPrefetchAsync(rhs, sizeof(T) * n, mygpu)); // => GPU
// CUWRAP_IF_CUDA_ERR(cudaMemPrefetchAsync(out, sizeof(T) * n, mygpu)); // => GPU
// kadd<<<param.blocks, param.threads_per_block, param.shared_size, (CUstream_st*)param.cuda_stream>>>(lhs, rhs, out, n);
// CUWRAP_IF_CUDA_ERR(cudaDeviceSynchronize());
// CUWRAP_IF_CUDA_ERR(cudaMemPrefetchAsync(out, sizeof(T) * n, cudaCpuDeviceId)); // => CPU
// cudaFree(lhs);
// if (lhs != rhs)
// cudaFree(rhs);
// if (lhs != out && rhs != out)
// cudaFree(out);
}
template <typename... Ts>
static void force_initialization__()
{
// (add_impl<Ts>(std::size_t{}, nullptr, nullptr, nullptr), ...); // CUDA: We do not support CXX17 currently.
std::initializer_list<std::nullptr_t>{ ((add_impl_t<Ts>{})(std::size_t{}, nullptr, nullptr, nullptr, kparam_t{}), nullptr)... };
}
void force_initialization()
{
force_initialization__<CUWRAP_ARITHMETIC_TS>();
}
} // namespace cuwrap
// ----------------------------------------------------------------------------------------------------------------
// Here're some notes that you may just ignore:
// CPU适合粗粒度的并行,而GPU适合细粒度的并行
// 因为CPU在进行上下文切换的时候需要把寄存器的数据丢到RAM,而GPU只需要通过寄存器组调度者选择对应的内容即可
// CPU的失速问题:对于大量小任务,CPU的执行时间大都会浪费在上下文切换上,而且基于时间片的调度策略(将时间平
// 均分片给每个线程)导致当线程多起来的时候上下文切换增加,从而导致效率降低。
// GPU的实现往往更高效,有效的使用`工作池`来保证其一直有事情做,当当前指令一直带进行等待的时候,SM会切换到另
// 外一个指令流,之后再执行被堵塞的指令
// cuda里面自动和host同步的:
// - cudaMalloc
// - cudaDeviceSynchronize
// - cudaMemcpy
// - Free
// 跑kernel是不需要同步的
// 所以在跑完kernel后一定要在·使用前·调用有同步功能的代码。
// Unified Memory is a single memory address space accessible from any processor in a system.
// 简单来说就是多个处理器的共享内存。
// The important point here is that the Pascal GPU architecture is the first with hardware
// support for virtual memory page faulting and migration, via its Page Migration Engine.
// Older GPUs based on the Kepler and Maxwell architectures also support a more limited form
// of Unified Memory. (1070Ti ~ Pascal)
// 对于Pascal GPU,只有当access(by GPU/CPU等) cudaMallocManaged()产生的内存的时候,他才会真正的分配内存。
// 减少migration overhead的tips:
// - Init data on GPU.
// - Unified Memory.
// CUBLAS VERSION ==================================================================
// #pragma once
// #include "../utils/util.hpp"
// #include <algorithm>
// #include <cublas_v2.h>
// #include <cuda_runtime.h>
// #include <device_launch_parameters.h>
// #include <vector>
// void test_cublas_vecadd()
// {
// using type = float;
// std::size_t size = 1 << 20;
// std::vector<type> cpu_data(size);
// std::generate(cpu_data.begin(), cpu_data.end(), std::rand);
// type *cuda_a, *cuda_b;
// cudaMalloc(&cuda_a, sizeof(type) * size);
// cudaMalloc(&cuda_b, sizeof(type) * size);
// // Cublas initialization
// cublasHandle_t cublas_hander;
// cublasCreate_v2(&cublas_hander);
// // mem : cpu => gpu
// cublasSetVector(size, sizeof(float), cpu_data.data(), 1, cuda_a, 1);
// cublasSetVector(size, sizeof(float), cpu_data.data(), 1, cuda_b, 1);
// // @@
// ganler::timer t; // CuBlas is 5x faster than my code.
// // Now we got: a, b => we want: c = a*scale +b;
// constexpr type scale = 2.0;
// cublasSaxpy_v2(cublas_hander, size, &scale, cuda_a, 1, cuda_b, 1);
// t.print_milli();
// cublasDestroy_v2(cublas_hander);
// cudaFree(cuda_a);
// cudaFree(cuda_b);
// } |
3ccc040a03fdebb808dd4ef0fd513103b5cb0257.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Shuffle intrinsics CUDA Sample
// This sample demonstrates the use of the shuffle intrinsic
// First, a simple example of a prefix sum using the shuffle to
// perform a scan operation is provided.
// Secondly, a more involved example of computing an integral image
// using the shuffle intrinsic is provided, where the shuffle
// scan operation and shuffle xor operations are used
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "shfl_integral_image.cuh"
// Scan using shfl - takes log2(n) steps
// This function demonstrates basic use of the shuffle intrinsic, __shfl_up,
// to perform a scan operation across a block.
// First, it performs a scan (prefix sum in this case) inside a warp
// Then to continue the scan operation across the block,
// each warp's sum is placed into shared memory. A single warp
// then performs a shuffle scan on that shared memory. The results
// are then uniformly added to each warp's threads.
// This pyramid type approach is continued by placing each block's
// final sum in global memory and prefix summing that via another kernel call,
// then uniformly adding across the input data via the uniform_add<<<>>> kernel.
__global__ void shfl_scan_test(int *data, int width, int *partial_sums = NULL) {
extern __shared__ int sums[];
int id = ((blockIdx.x * blockDim.x) + threadIdx.x);
int lane_id = id % warpSize;
// determine a warp_id within a block
int warp_id = threadIdx.x / warpSize;
// Below is the basic structure of using a shfl instruction
// for a scan.
// Record "value" as a variable - we accumulate it along the way
int value = data[id];
// Now accumulate in log steps up the chain
// compute sums, with another thread's value who is
// distance delta away (i). Note
// those threads where the thread 'i' away would have
// been out of bounds of the warp are unaffected. This
// creates the scan sum.
#pragma unroll
for (int i = 1; i <= width; i *= 2) {
unsigned int mask = 0xffffffff;
int n = __shfl_up_sync(mask, value, i, width);
if (lane_id >= i) value += n;
}
// value now holds the scan value for the individual thread
// next sum the largest values for each warp
// write the sum of the warp to smem
if (threadIdx.x % warpSize == warpSize - 1) {
sums[warp_id] = value;
}
__syncthreads();
//
// scan sum the warp sums
// the same shfl scan operation, but performed on warp sums
//
if (warp_id == 0 && lane_id < (blockDim.x / warpSize)) {
int warp_sum = sums[lane_id];
int mask = (1 << (blockDim.x / warpSize)) - 1;
for (int i = 1; i <= (blockDim.x / warpSize); i *= 2) {
int n = __shfl_up_sync(mask, warp_sum, i, (blockDim.x / warpSize));
if (lane_id >= i) warp_sum += n;
}
sums[lane_id] = warp_sum;
}
__syncthreads();
// perform a uniform add across warps in the block
// read neighbouring warp's sum and add it to threads value
int blockSum = 0;
if (warp_id > 0) {
blockSum = sums[warp_id - 1];
}
value += blockSum;
// Now write out our result
data[id] = value;
// last thread has sum, write write out the block's sum
if (partial_sums != NULL && threadIdx.x == blockDim.x - 1) {
partial_sums[blockIdx.x] = value;
}
}
// Uniform add: add partial sums array
__global__ void uniform_add(int *data, int *partial_sums, int len) {
__shared__ int buf;
int id = ((blockIdx.x * blockDim.x) + threadIdx.x);
if (id > len) return;
if (threadIdx.x == 0) {
buf = partial_sums[blockIdx.x];
}
__syncthreads();
data[id] += buf;
}
static unsigned int iDivUp(unsigned int dividend, unsigned int divisor) {
return ((dividend % divisor) == 0) ? (dividend / divisor)
: (dividend / divisor + 1);
}
// This function verifies the shuffle scan result, for the simple
// prefix sum case.
bool CPUverify(int *h_data, int *h_result, int n_elements) {
// cpu verify
for (int i = 0; i < n_elements - 1; i++) {
h_data[i + 1] = h_data[i] + h_data[i + 1];
}
int diff = 0;
for (int i = 0; i < n_elements; i++) {
diff += h_data[i] - h_result[i];
}
printf("CPU verify result diff (GPUvsCPU) = %d\n", diff);
bool bTestResult = false;
if (diff == 0) bTestResult = true;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (int j = 0; j < 100; j++)
for (int i = 0; i < n_elements - 1; i++) {
h_data[i + 1] = h_data[i] + h_data[i + 1];
}
sdkStopTimer(&hTimer);
double cput = sdkGetTimerValue(&hTimer);
printf("CPU sum (naive) took %f ms\n", cput / 100);
return bTestResult;
}
// this verifies the row scan result for synthetic data of all 1's
unsigned int verifyDataRowSums(unsigned int *h_image, int w, int h) {
unsigned int diff = 0;
for (int j = 0; j < h; j++) {
for (int i = 0; i < w; i++) {
int gold = i + 1;
diff +=
abs(static_cast<int>(gold) - static_cast<int>(h_image[j * w + i]));
}
}
return diff;
}
bool shuffle_simple_test(int argc, char **argv) {
int *h_data, *h_partial_sums, *h_result;
int *d_data, *d_partial_sums;
const int n_elements = 65536;
int sz = sizeof(int) * n_elements;
int cuda_device = 0;
printf("Starting shfl_scan\n");
// use command-line specified CUDA device, otherwise use device with highest
// Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDevice(&cuda_device));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// __shfl intrinsic needs SM 3.0 or higher
if (deviceProp.major < 3) {
printf("> __shfl() intrinsic requires device SM 3.0+\n");
printf("> Waiving test.\n");
exit(EXIT_WAIVED);
}
checkCudaErrors(hipHostMalloc(reinterpret_cast<void **>(&h_data),
sizeof(int) * n_elements));
checkCudaErrors(hipHostMalloc(reinterpret_cast<void **>(&h_result),
sizeof(int) * n_elements));
// initialize data:
printf("Computing Simple Sum test\n");
printf("---------------------------------------------------\n");
printf("Initialize test data [1, 1, 1...]\n");
for (int i = 0; i < n_elements; i++) {
h_data[i] = 1;
}
int blockSize = 256;
int gridSize = n_elements / blockSize;
int nWarps = blockSize / 32;
int shmem_sz = nWarps * sizeof(int);
int n_partialSums = n_elements / blockSize;
int partial_sz = n_partialSums * sizeof(int);
printf("Scan summation for %d elements, %d partial sums\n", n_elements,
n_elements / blockSize);
int p_blockSize = min(n_partialSums, blockSize);
int p_gridSize = iDivUp(n_partialSums, p_blockSize);
printf("Partial summing %d elements with %d blocks of size %d\n",
n_partialSums, p_gridSize, p_blockSize);
// initialize a timer
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
float et = 0;
float inc = 0;
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_data), sz));
checkCudaErrors(
hipMalloc(reinterpret_cast<void **>(&d_partial_sums), partial_sz));
checkCudaErrors(hipMemset(d_partial_sums, 0, partial_sz));
checkCudaErrors(
hipHostMalloc(reinterpret_cast<void **>(&h_partial_sums), partial_sz));
checkCudaErrors(hipMemcpy(d_data, h_data, sz, hipMemcpyHostToDevice));
checkCudaErrors(hipEventRecord(start, 0));
hipLaunchKernelGGL(( shfl_scan_test), dim3(gridSize), dim3(blockSize), shmem_sz, 0, d_data, 32, d_partial_sums);
hipLaunchKernelGGL(( shfl_scan_test), dim3(p_gridSize), dim3(p_blockSize), shmem_sz, 0, d_partial_sums, 32);
hipLaunchKernelGGL(( uniform_add), dim3(gridSize - 1), dim3(blockSize), 0, 0, d_data + blockSize, d_partial_sums,
n_elements);
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&inc, start, stop));
et += inc;
checkCudaErrors(hipMemcpy(h_result, d_data, sz, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_partial_sums, d_partial_sums, partial_sz,
hipMemcpyDeviceToHost));
printf("Test Sum: %d\n", h_partial_sums[n_partialSums - 1]);
printf("Time (ms): %f\n", et);
printf("%d elements scanned in %f ms -> %f MegaElements/s\n", n_elements, et,
n_elements / (et / 1000.0f) / 1000000.0f);
bool bTestResult = CPUverify(h_data, h_result, n_elements);
checkCudaErrors(hipHostFree(h_data));
checkCudaErrors(hipHostFree(h_result));
checkCudaErrors(hipHostFree(h_partial_sums));
checkCudaErrors(hipFree(d_data));
checkCudaErrors(hipFree(d_partial_sums));
return bTestResult;
}
// This function tests creation of an integral image using
// synthetic data, of size 1920x1080 pixels greyscale.
bool shuffle_integral_image_test() {
char *d_data;
unsigned int *h_image;
unsigned int *d_integral_image;
int w = 1920;
int h = 1080;
int n_elements = w * h;
int sz = sizeof(unsigned int) * n_elements;
printf("\nComputing Integral Image Test on size %d x %d synthetic data\n", w,
h);
printf("---------------------------------------------------\n");
checkCudaErrors(hipHostMalloc(reinterpret_cast<void **>(&h_image), sz));
// fill test "image" with synthetic 1's data
memset(h_image, 0, sz);
// each thread handles 16 values, use 1 block/row
int blockSize = iDivUp(w, 16);
// launch 1 block / row
int gridSize = h;
// Create a synthetic image for testing
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_data), sz));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_integral_image),
n_elements * sizeof(int) * 4));
checkCudaErrors(hipMemset(d_data, 1, sz));
checkCudaErrors(hipMemset(d_integral_image, 0, sz));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float et = 0;
unsigned int err;
// Execute scan line prefix sum kernel, and time it
hipEventRecord(start);
hipLaunchKernelGGL(( shfl_intimage_rows), dim3(gridSize), dim3(blockSize), 0, 0,
reinterpret_cast<uint4 *>(d_data),
reinterpret_cast<uint4 *>(d_integral_image));
hipEventRecord(stop);
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&et, start, stop));
printf("Method: Fast Time (GPU Timer): %f ms ", et);
// verify the scan line results
checkCudaErrors(
hipMemcpy(h_image, d_integral_image, sz, hipMemcpyDeviceToHost));
err = verifyDataRowSums(h_image, w, h);
printf("Diff = %d\n", err);
// Execute column prefix sum kernel and time it
dim3 blockSz(32, 8);
dim3 testGrid(w / blockSz.x, 1);
hipEventRecord(start);
hipLaunchKernelGGL(( shfl_vertical_shfl), dim3(testGrid), dim3(blockSz), 0, 0, (unsigned int *)d_integral_image, w,
h);
hipEventRecord(stop);
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&et, start, stop));
printf("Method: Vertical Scan Time (GPU Timer): %f ms ", et);
// Verify the column results
checkCudaErrors(
hipMemcpy(h_image, d_integral_image, sz, hipMemcpyDeviceToHost));
printf("\n");
int finalSum = h_image[w * h - 1];
printf("CheckSum: %d, (expect %dx%d=%d)\n", finalSum, w, h, w * h);
checkCudaErrors(hipFree(d_data));
checkCudaErrors(hipFree(d_integral_image));
checkCudaErrors(hipHostFree(h_image));
// verify final sum: if the final value in the corner is the same as the size
// of the buffer (all 1's) then the integral image was generated successfully
return (finalSum == w * h) ? true : false;
}
int main(int argc, char *argv[]) {
// Initialization. The shuffle intrinsic is not available on SM < 3.0
// so waive the test if the hardware is not present.
int cuda_device = 0;
printf("Starting shfl_scan\n");
// use command-line specified CUDA device, otherwise use device with highest
// Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDevice(&cuda_device));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// __shfl intrinsic needs SM 3.0 or higher
if (deviceProp.major < 3) {
printf("> __shfl() intrinsic requires device SM 3.0+\n");
printf("> Waiving test.\n");
exit(EXIT_WAIVED);
}
bool bTestResult = true;
bool simpleTest = shuffle_simple_test(argc, argv);
bool intTest = shuffle_integral_image_test();
bTestResult = simpleTest & intTest;
exit((bTestResult) ? EXIT_SUCCESS : EXIT_FAILURE);
}
| 3ccc040a03fdebb808dd4ef0fd513103b5cb0257.cu | /* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Shuffle intrinsics CUDA Sample
// This sample demonstrates the use of the shuffle intrinsic
// First, a simple example of a prefix sum using the shuffle to
// perform a scan operation is provided.
// Secondly, a more involved example of computing an integral image
// using the shuffle intrinsic is provided, where the shuffle
// scan operation and shuffle xor operations are used
#include <stdio.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "shfl_integral_image.cuh"
// Scan using shfl - takes log2(n) steps
// This function demonstrates basic use of the shuffle intrinsic, __shfl_up,
// to perform a scan operation across a block.
// First, it performs a scan (prefix sum in this case) inside a warp
// Then to continue the scan operation across the block,
// each warp's sum is placed into shared memory. A single warp
// then performs a shuffle scan on that shared memory. The results
// are then uniformly added to each warp's threads.
// This pyramid type approach is continued by placing each block's
// final sum in global memory and prefix summing that via another kernel call,
// then uniformly adding across the input data via the uniform_add<<<>>> kernel.
__global__ void shfl_scan_test(int *data, int width, int *partial_sums = NULL) {
extern __shared__ int sums[];
int id = ((blockIdx.x * blockDim.x) + threadIdx.x);
int lane_id = id % warpSize;
// determine a warp_id within a block
int warp_id = threadIdx.x / warpSize;
// Below is the basic structure of using a shfl instruction
// for a scan.
// Record "value" as a variable - we accumulate it along the way
int value = data[id];
// Now accumulate in log steps up the chain
// compute sums, with another thread's value who is
// distance delta away (i). Note
// those threads where the thread 'i' away would have
// been out of bounds of the warp are unaffected. This
// creates the scan sum.
#pragma unroll
for (int i = 1; i <= width; i *= 2) {
unsigned int mask = 0xffffffff;
int n = __shfl_up_sync(mask, value, i, width);
if (lane_id >= i) value += n;
}
// value now holds the scan value for the individual thread
// next sum the largest values for each warp
// write the sum of the warp to smem
if (threadIdx.x % warpSize == warpSize - 1) {
sums[warp_id] = value;
}
__syncthreads();
//
// scan sum the warp sums
// the same shfl scan operation, but performed on warp sums
//
if (warp_id == 0 && lane_id < (blockDim.x / warpSize)) {
int warp_sum = sums[lane_id];
int mask = (1 << (blockDim.x / warpSize)) - 1;
for (int i = 1; i <= (blockDim.x / warpSize); i *= 2) {
int n = __shfl_up_sync(mask, warp_sum, i, (blockDim.x / warpSize));
if (lane_id >= i) warp_sum += n;
}
sums[lane_id] = warp_sum;
}
__syncthreads();
// perform a uniform add across warps in the block
// read neighbouring warp's sum and add it to threads value
int blockSum = 0;
if (warp_id > 0) {
blockSum = sums[warp_id - 1];
}
value += blockSum;
// Now write out our result
data[id] = value;
// last thread has sum, write write out the block's sum
if (partial_sums != NULL && threadIdx.x == blockDim.x - 1) {
partial_sums[blockIdx.x] = value;
}
}
// Uniform add: add partial sums array
__global__ void uniform_add(int *data, int *partial_sums, int len) {
__shared__ int buf;
int id = ((blockIdx.x * blockDim.x) + threadIdx.x);
if (id > len) return;
if (threadIdx.x == 0) {
buf = partial_sums[blockIdx.x];
}
__syncthreads();
data[id] += buf;
}
static unsigned int iDivUp(unsigned int dividend, unsigned int divisor) {
return ((dividend % divisor) == 0) ? (dividend / divisor)
: (dividend / divisor + 1);
}
// This function verifies the shuffle scan result, for the simple
// prefix sum case.
bool CPUverify(int *h_data, int *h_result, int n_elements) {
// cpu verify
for (int i = 0; i < n_elements - 1; i++) {
h_data[i + 1] = h_data[i] + h_data[i + 1];
}
int diff = 0;
for (int i = 0; i < n_elements; i++) {
diff += h_data[i] - h_result[i];
}
printf("CPU verify result diff (GPUvsCPU) = %d\n", diff);
bool bTestResult = false;
if (diff == 0) bTestResult = true;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (int j = 0; j < 100; j++)
for (int i = 0; i < n_elements - 1; i++) {
h_data[i + 1] = h_data[i] + h_data[i + 1];
}
sdkStopTimer(&hTimer);
double cput = sdkGetTimerValue(&hTimer);
printf("CPU sum (naive) took %f ms\n", cput / 100);
return bTestResult;
}
// this verifies the row scan result for synthetic data of all 1's
unsigned int verifyDataRowSums(unsigned int *h_image, int w, int h) {
unsigned int diff = 0;
for (int j = 0; j < h; j++) {
for (int i = 0; i < w; i++) {
int gold = i + 1;
diff +=
abs(static_cast<int>(gold) - static_cast<int>(h_image[j * w + i]));
}
}
return diff;
}
bool shuffle_simple_test(int argc, char **argv) {
int *h_data, *h_partial_sums, *h_result;
int *d_data, *d_partial_sums;
const int n_elements = 65536;
int sz = sizeof(int) * n_elements;
int cuda_device = 0;
printf("Starting shfl_scan\n");
// use command-line specified CUDA device, otherwise use device with highest
// Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDevice(&cuda_device));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device));
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// __shfl intrinsic needs SM 3.0 or higher
if (deviceProp.major < 3) {
printf("> __shfl() intrinsic requires device SM 3.0+\n");
printf("> Waiving test.\n");
exit(EXIT_WAIVED);
}
checkCudaErrors(cudaMallocHost(reinterpret_cast<void **>(&h_data),
sizeof(int) * n_elements));
checkCudaErrors(cudaMallocHost(reinterpret_cast<void **>(&h_result),
sizeof(int) * n_elements));
// initialize data:
printf("Computing Simple Sum test\n");
printf("---------------------------------------------------\n");
printf("Initialize test data [1, 1, 1...]\n");
for (int i = 0; i < n_elements; i++) {
h_data[i] = 1;
}
int blockSize = 256;
int gridSize = n_elements / blockSize;
int nWarps = blockSize / 32;
int shmem_sz = nWarps * sizeof(int);
int n_partialSums = n_elements / blockSize;
int partial_sz = n_partialSums * sizeof(int);
printf("Scan summation for %d elements, %d partial sums\n", n_elements,
n_elements / blockSize);
int p_blockSize = min(n_partialSums, blockSize);
int p_gridSize = iDivUp(n_partialSums, p_blockSize);
printf("Partial summing %d elements with %d blocks of size %d\n",
n_partialSums, p_gridSize, p_blockSize);
// initialize a timer
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
float et = 0;
float inc = 0;
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_data), sz));
checkCudaErrors(
cudaMalloc(reinterpret_cast<void **>(&d_partial_sums), partial_sz));
checkCudaErrors(cudaMemset(d_partial_sums, 0, partial_sz));
checkCudaErrors(
cudaMallocHost(reinterpret_cast<void **>(&h_partial_sums), partial_sz));
checkCudaErrors(cudaMemcpy(d_data, h_data, sz, cudaMemcpyHostToDevice));
checkCudaErrors(cudaEventRecord(start, 0));
shfl_scan_test<<<gridSize, blockSize, shmem_sz>>>(d_data, 32, d_partial_sums);
shfl_scan_test<<<p_gridSize, p_blockSize, shmem_sz>>>(d_partial_sums, 32);
uniform_add<<<gridSize - 1, blockSize>>>(d_data + blockSize, d_partial_sums,
n_elements);
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&inc, start, stop));
et += inc;
checkCudaErrors(cudaMemcpy(h_result, d_data, sz, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_partial_sums, d_partial_sums, partial_sz,
cudaMemcpyDeviceToHost));
printf("Test Sum: %d\n", h_partial_sums[n_partialSums - 1]);
printf("Time (ms): %f\n", et);
printf("%d elements scanned in %f ms -> %f MegaElements/s\n", n_elements, et,
n_elements / (et / 1000.0f) / 1000000.0f);
bool bTestResult = CPUverify(h_data, h_result, n_elements);
checkCudaErrors(cudaFreeHost(h_data));
checkCudaErrors(cudaFreeHost(h_result));
checkCudaErrors(cudaFreeHost(h_partial_sums));
checkCudaErrors(cudaFree(d_data));
checkCudaErrors(cudaFree(d_partial_sums));
return bTestResult;
}
// This function tests creation of an integral image using
// synthetic data, of size 1920x1080 pixels greyscale.
bool shuffle_integral_image_test() {
char *d_data;
unsigned int *h_image;
unsigned int *d_integral_image;
int w = 1920;
int h = 1080;
int n_elements = w * h;
int sz = sizeof(unsigned int) * n_elements;
printf("\nComputing Integral Image Test on size %d x %d synthetic data\n", w,
h);
printf("---------------------------------------------------\n");
checkCudaErrors(cudaMallocHost(reinterpret_cast<void **>(&h_image), sz));
// fill test "image" with synthetic 1's data
memset(h_image, 0, sz);
// each thread handles 16 values, use 1 block/row
int blockSize = iDivUp(w, 16);
// launch 1 block / row
int gridSize = h;
// Create a synthetic image for testing
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_data), sz));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_integral_image),
n_elements * sizeof(int) * 4));
checkCudaErrors(cudaMemset(d_data, 1, sz));
checkCudaErrors(cudaMemset(d_integral_image, 0, sz));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float et = 0;
unsigned int err;
// Execute scan line prefix sum kernel, and time it
cudaEventRecord(start);
shfl_intimage_rows<<<gridSize, blockSize>>>(
reinterpret_cast<uint4 *>(d_data),
reinterpret_cast<uint4 *>(d_integral_image));
cudaEventRecord(stop);
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&et, start, stop));
printf("Method: Fast Time (GPU Timer): %f ms ", et);
// verify the scan line results
checkCudaErrors(
cudaMemcpy(h_image, d_integral_image, sz, cudaMemcpyDeviceToHost));
err = verifyDataRowSums(h_image, w, h);
printf("Diff = %d\n", err);
// Execute column prefix sum kernel and time it
dim3 blockSz(32, 8);
dim3 testGrid(w / blockSz.x, 1);
cudaEventRecord(start);
shfl_vertical_shfl<<<testGrid, blockSz>>>((unsigned int *)d_integral_image, w,
h);
cudaEventRecord(stop);
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&et, start, stop));
printf("Method: Vertical Scan Time (GPU Timer): %f ms ", et);
// Verify the column results
checkCudaErrors(
cudaMemcpy(h_image, d_integral_image, sz, cudaMemcpyDeviceToHost));
printf("\n");
int finalSum = h_image[w * h - 1];
printf("CheckSum: %d, (expect %dx%d=%d)\n", finalSum, w, h, w * h);
checkCudaErrors(cudaFree(d_data));
checkCudaErrors(cudaFree(d_integral_image));
checkCudaErrors(cudaFreeHost(h_image));
// verify final sum: if the final value in the corner is the same as the size
// of the buffer (all 1's) then the integral image was generated successfully
return (finalSum == w * h) ? true : false;
}
int main(int argc, char *argv[]) {
// Initialization. The shuffle intrinsic is not available on SM < 3.0
// so waive the test if the hardware is not present.
int cuda_device = 0;
printf("Starting shfl_scan\n");
// use command-line specified CUDA device, otherwise use device with highest
// Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDevice(&cuda_device));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device));
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// __shfl intrinsic needs SM 3.0 or higher
if (deviceProp.major < 3) {
printf("> __shfl() intrinsic requires device SM 3.0+\n");
printf("> Waiving test.\n");
exit(EXIT_WAIVED);
}
bool bTestResult = true;
bool simpleTest = shuffle_simple_test(argc, argv);
bool intTest = shuffle_integral_image_test();
bTestResult = simpleTest & intTest;
exit((bTestResult) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
9076697af6ca551427ff0c0b2286e4167248a5d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <chrono>
#include <iostream>
#include <cmath>
#define TPB 256
/*
array size 5000: CPU computing time:1.21e-05 GPU computing time:0.167205 The difference is 0.000000
array size 50000: CPU computing time:0.0001292 GPU computing time:0.158399 The difference is 0.000000
array size 500000: CPU computing time:0.0013506 GPU computing time:0.167526 The difference is 0.000000
array size 5000000: CPU computing time:0.0176518 GPU computing time:0.173352 The difference is 0.000000
array size 50000000: CPU computing time:0.199978 GPU computing time:0.274253 The difference is 0.000000
array size 500000000: CPU computing time:2.09953 GPU computing time:1.18089 The difference is 0.000000
*/
__global__ void saxpyKernel(float *x, float *y, const float a, const int n)
{
const int id = blockIdx.x*blockDim.x + threadIdx.x;
if(id>=n)
{
return;
}
y[id] = x[id] * y[id] + a;
}
void cpuSaxpy(float *x, float *y, const float a, const int n)
{
for(size_t i=0; i<n;++i)
{
y[i] = x[i] * y[i] + a;
}
}
float difference(float *a, float *b, const int n)
{
float result = 0.0;
for(size_t i=0; i<n;++i)
{
result += std::abs(a[i] - b[i]);
}
return result;
}
int main()
{
int array_size;
std::cin >> array_size;
const float a = 1.0;
// switch to heap memory to allow for larger array size
float *x1 = new float[array_size];
float *y1 = new float[array_size];
for(size_t i = 0; i<array_size;++i)
{
x1[i] = 3.3+1e-10;
y1[i] = 3.4+1e-10;
}
auto start_time_cpu = std::chrono::high_resolution_clock::now();
cpuSaxpy(x1,y1,a, array_size);
auto end_time_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> time_cpu = end_time_cpu-start_time_cpu;
printf("CPU SAXPY completed!\n");
std::cout<<"CPU computing time:"<<time_cpu.count()<<std::endl;
float *x2= new float[array_size];
float *y2= new float[array_size];
for(size_t i = 0; i<array_size;++i)
{
x2[i] = 3.3+1e-10;
y2[i] = 3.4+1e-10;
}
auto start_time_gpu = std::chrono::high_resolution_clock::now();
float *dx2 = nullptr;
float *dy2 = nullptr;
auto byteSize = array_size * sizeof(float);
hipMalloc(&dx2, byteSize);
hipMalloc(&dy2, byteSize);
hipMemcpy(dx2, x2, byteSize, hipMemcpyHostToDevice);
hipMemcpy(dy2, y2, byteSize, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( saxpyKernel), dim3((array_size+TPB-1)/TPB), dim3(TPB), 0, 0, dx2,dy2,a, array_size);
hipDeviceSynchronize();
hipMemcpy(y2, dy2, byteSize, hipMemcpyDeviceToHost);
hipFree(dx2);
hipFree(dy2);
auto end_time_gpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> time_gpu = end_time_gpu-start_time_gpu;
printf("GPU SAXPY completed!\n");
std::cout<<"GPU computing time:"<<time_gpu.count()<<std::endl;
float diff = difference(y1, y2,array_size);
printf("Comparison completed! The difference is %f\n", diff);
delete x1,x2,y1,y2;
return 0;
} | 9076697af6ca551427ff0c0b2286e4167248a5d7.cu | #include <stdio.h>
#include <chrono>
#include <iostream>
#include <cmath>
#define TPB 256
/*
array size 5000: CPU computing time:1.21e-05 GPU computing time:0.167205 The difference is 0.000000
array size 50000: CPU computing time:0.0001292 GPU computing time:0.158399 The difference is 0.000000
array size 500000: CPU computing time:0.0013506 GPU computing time:0.167526 The difference is 0.000000
array size 5000000: CPU computing time:0.0176518 GPU computing time:0.173352 The difference is 0.000000
array size 50000000: CPU computing time:0.199978 GPU computing time:0.274253 The difference is 0.000000
array size 500000000: CPU computing time:2.09953 GPU computing time:1.18089 The difference is 0.000000
*/
__global__ void saxpyKernel(float *x, float *y, const float a, const int n)
{
const int id = blockIdx.x*blockDim.x + threadIdx.x;
if(id>=n)
{
return;
}
y[id] = x[id] * y[id] + a;
}
void cpuSaxpy(float *x, float *y, const float a, const int n)
{
for(size_t i=0; i<n;++i)
{
y[i] = x[i] * y[i] + a;
}
}
float difference(float *a, float *b, const int n)
{
float result = 0.0;
for(size_t i=0; i<n;++i)
{
result += std::abs(a[i] - b[i]);
}
return result;
}
int main()
{
int array_size;
std::cin >> array_size;
const float a = 1.0;
// switch to heap memory to allow for larger array size
float *x1 = new float[array_size];
float *y1 = new float[array_size];
for(size_t i = 0; i<array_size;++i)
{
x1[i] = 3.3+1e-10;
y1[i] = 3.4+1e-10;
}
auto start_time_cpu = std::chrono::high_resolution_clock::now();
cpuSaxpy(x1,y1,a, array_size);
auto end_time_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> time_cpu = end_time_cpu-start_time_cpu;
printf("CPU SAXPY completed!\n");
std::cout<<"CPU computing time:"<<time_cpu.count()<<std::endl;
float *x2= new float[array_size];
float *y2= new float[array_size];
for(size_t i = 0; i<array_size;++i)
{
x2[i] = 3.3+1e-10;
y2[i] = 3.4+1e-10;
}
auto start_time_gpu = std::chrono::high_resolution_clock::now();
float *dx2 = nullptr;
float *dy2 = nullptr;
auto byteSize = array_size * sizeof(float);
cudaMalloc(&dx2, byteSize);
cudaMalloc(&dy2, byteSize);
cudaMemcpy(dx2, x2, byteSize, cudaMemcpyHostToDevice);
cudaMemcpy(dy2, y2, byteSize, cudaMemcpyHostToDevice);
saxpyKernel<<<(array_size+TPB-1)/TPB, TPB>>>(dx2,dy2,a, array_size);
cudaDeviceSynchronize();
cudaMemcpy(y2, dy2, byteSize, cudaMemcpyDeviceToHost);
cudaFree(dx2);
cudaFree(dy2);
auto end_time_gpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> time_gpu = end_time_gpu-start_time_gpu;
printf("GPU SAXPY completed!\n");
std::cout<<"GPU computing time:"<<time_gpu.count()<<std::endl;
float diff = difference(y1, y2,array_size);
printf("Comparison completed! The difference is %f\n", diff);
delete x1,x2,y1,y2;
return 0;
} |
dfada8be82ccea017fd19cb22cddc522d3cce20a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define n 0.0002
#define p 0.5
#define G 0.75
#define SIZE 1024
#define NUMBER_OF_ITERATIONS 100
#define DEBUG 0
__device__ int idx(int i, int j){
return (SIZE * i + j);
}
__global__ void foo(double *u1){
int i = blockIdx.x;
int j = threadIdx.x;
printf("u1[%d,%d]: %.3lf \t",i, j, u1[idx(i,j)] );
}
__global__ void updateElement(double *u, double *u1, double *u2)
{
int i = blockIdx.x;
int j = threadIdx.x;
//printf("i: %d j: %d \n", i, j);
for(int j=0; j < SIZE; j++)
//for(int i=0; i < SIZE; i++)
{
//taken care of by other threads
if(i == 0 || j == 0 || i == SIZE-1 || j == SIZE-1){
continue;
}
u[idx(i, j)]= p *
(u1[idx(i-1,j)] + u1[idx(i+1,j)]
+u1[idx(i,j-1)] + u1[idx(i,j+1)]
- 4 * u1[idx(i, j)])
+ 2 * u1[idx(i, j)] - (1-n) * u2[idx(i, j)];
if(j==1){
u[idx(i,0)] = G * u[idx(i, j)];
//top left corner
if(i == 1){
u[idx(0,0)] = G * u[idx(1,0)];
}
//top right corner
if(i == SIZE-2){
u[idx(SIZE-1,0)] = G * u[idx(SIZE-2, 0)];
}
}
if(i==1){
u[idx(0, j)] = G * u[idx(i, j)];
//bottom left corner
if(j==SIZE-2){
u[idx(0,SIZE-1)] = G * u[idx(0, SIZE-2)];
}
}
if(j == SIZE-2){
u[idx(i, SIZE-1)] = G * u[idx(i, j)];
}
if(i == SIZE-2){
u[idx(SIZE-1, j)] = G * u[idx(i, j)];
//bottom right corner
if(j== SIZE-2){
u[idx(SIZE-1, SIZE-1)] = G * u[idx(SIZE-1, SIZE-2)];
}
}
}
}
void printMatrix(double* u){
printf("\n");
for(int i = 0; i < SIZE * SIZE; i++){
printf("%.3lf", u[i]);
printf("\t");
if((i+1) % SIZE == 0 && i > 0){
printf("\n");
}
}
}
int main(){
double* u = static_cast<double*>(malloc(sizeof(double) * SIZE * SIZE ));
double* u1 = static_cast<double*>(malloc(sizeof(double) * SIZE * SIZE ));
double* u2 = static_cast<double*>(malloc(sizeof(double) * SIZE * SIZE ));
//initialize to 0
for(int i = 0; i < SIZE * SIZE; i++){
//u[i] = 0;
u1[i] = 0;
u2[i] = 0;
}
//hit that drummmm
//u1[idx(SIZE/2, SIZE/2)] = 1;
u1[(SIZE * SIZE/2 + SIZE/2)] = 1;
//printMatrix(u1);
clock_t start, end;
double cpu_time_used;
double* u_dev, *u2_dev;
double *u1_dev;
hipMalloc((void **)&u_dev, SIZE*SIZE *sizeof(double));
hipMalloc((void **)&u1_dev, SIZE*SIZE *sizeof(double));
hipMalloc((void **)&u2_dev, SIZE*SIZE *sizeof(double));
hipMemcpy(u_dev, u, SIZE*SIZE *sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(u1_dev, u1, SIZE*SIZE *sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(u2_dev, u2, SIZE*SIZE *sizeof(double), hipMemcpyHostToDevice);
u1[(SIZE * SIZE/2 + SIZE/2)] = 1;
start = clock();
for(int i = 0; i < NUMBER_OF_ITERATIONS ; i++){
updateElement << <SIZE, 1 >> > (u_dev, u1_dev, u2_dev);
hipDeviceSynchronize();
if(DEBUG){
hipMemcpy(u, u_dev, SIZE*SIZE *sizeof(double), hipMemcpyDeviceToHost);
//hipMemcpy(u1, u1_dev, SIZE*SIZE *sizeof(double), hipMemcpyDeviceToHost);
//printMatrix(u);
printf("\n\n%lf", u[(SIZE * SIZE/2 + SIZE/2)] );
}
hipMemcpy(u2_dev, u1_dev, SIZE*SIZE *sizeof(double), hipMemcpyDeviceToDevice);
hipMemcpy(u1_dev, u_dev, SIZE*SIZE *sizeof(double), hipMemcpyDeviceToDevice);
}
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\n Part 3_many blocks, time: \t%lf \n", cpu_time_used);
hipFree(u_dev);
hipFree(u1_dev);
hipFree(u2_dev);
free(u);
free(u1);
free(u2);
} | dfada8be82ccea017fd19cb22cddc522d3cce20a.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define n 0.0002
#define p 0.5
#define G 0.75
#define SIZE 1024
#define NUMBER_OF_ITERATIONS 100
#define DEBUG 0
__device__ int idx(int i, int j){
return (SIZE * i + j);
}
__global__ void foo(double *u1){
int i = blockIdx.x;
int j = threadIdx.x;
printf("u1[%d,%d]: %.3lf \t",i, j, u1[idx(i,j)] );
}
__global__ void updateElement(double *u, double *u1, double *u2)
{
int i = blockIdx.x;
int j = threadIdx.x;
//printf("i: %d j: %d \n", i, j);
for(int j=0; j < SIZE; j++)
//for(int i=0; i < SIZE; i++)
{
//taken care of by other threads
if(i == 0 || j == 0 || i == SIZE-1 || j == SIZE-1){
continue;
}
u[idx(i, j)]= p *
(u1[idx(i-1,j)] + u1[idx(i+1,j)]
+u1[idx(i,j-1)] + u1[idx(i,j+1)]
- 4 * u1[idx(i, j)])
+ 2 * u1[idx(i, j)] - (1-n) * u2[idx(i, j)];
if(j==1){
u[idx(i,0)] = G * u[idx(i, j)];
//top left corner
if(i == 1){
u[idx(0,0)] = G * u[idx(1,0)];
}
//top right corner
if(i == SIZE-2){
u[idx(SIZE-1,0)] = G * u[idx(SIZE-2, 0)];
}
}
if(i==1){
u[idx(0, j)] = G * u[idx(i, j)];
//bottom left corner
if(j==SIZE-2){
u[idx(0,SIZE-1)] = G * u[idx(0, SIZE-2)];
}
}
if(j == SIZE-2){
u[idx(i, SIZE-1)] = G * u[idx(i, j)];
}
if(i == SIZE-2){
u[idx(SIZE-1, j)] = G * u[idx(i, j)];
//bottom right corner
if(j== SIZE-2){
u[idx(SIZE-1, SIZE-1)] = G * u[idx(SIZE-1, SIZE-2)];
}
}
}
}
void printMatrix(double* u){
printf("\n");
for(int i = 0; i < SIZE * SIZE; i++){
printf("%.3lf", u[i]);
printf("\t");
if((i+1) % SIZE == 0 && i > 0){
printf("\n");
}
}
}
int main(){
double* u = static_cast<double*>(malloc(sizeof(double) * SIZE * SIZE ));
double* u1 = static_cast<double*>(malloc(sizeof(double) * SIZE * SIZE ));
double* u2 = static_cast<double*>(malloc(sizeof(double) * SIZE * SIZE ));
//initialize to 0
for(int i = 0; i < SIZE * SIZE; i++){
//u[i] = 0;
u1[i] = 0;
u2[i] = 0;
}
//hit that drummmm
//u1[idx(SIZE/2, SIZE/2)] = 1;
u1[(SIZE * SIZE/2 + SIZE/2)] = 1;
//printMatrix(u1);
clock_t start, end;
double cpu_time_used;
double* u_dev, *u2_dev;
double *u1_dev;
cudaMalloc((void **)&u_dev, SIZE*SIZE *sizeof(double));
cudaMalloc((void **)&u1_dev, SIZE*SIZE *sizeof(double));
cudaMalloc((void **)&u2_dev, SIZE*SIZE *sizeof(double));
cudaMemcpy(u_dev, u, SIZE*SIZE *sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(u1_dev, u1, SIZE*SIZE *sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(u2_dev, u2, SIZE*SIZE *sizeof(double), cudaMemcpyHostToDevice);
u1[(SIZE * SIZE/2 + SIZE/2)] = 1;
start = clock();
for(int i = 0; i < NUMBER_OF_ITERATIONS ; i++){
updateElement << <SIZE, 1 >> > (u_dev, u1_dev, u2_dev);
cudaDeviceSynchronize();
if(DEBUG){
cudaMemcpy(u, u_dev, SIZE*SIZE *sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemcpy(u1, u1_dev, SIZE*SIZE *sizeof(double), cudaMemcpyDeviceToHost);
//printMatrix(u);
printf("\n\n%lf", u[(SIZE * SIZE/2 + SIZE/2)] );
}
cudaMemcpy(u2_dev, u1_dev, SIZE*SIZE *sizeof(double), cudaMemcpyDeviceToDevice);
cudaMemcpy(u1_dev, u_dev, SIZE*SIZE *sizeof(double), cudaMemcpyDeviceToDevice);
}
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\n Part 3_many blocks, time: \t%lf \n", cpu_time_used);
cudaFree(u_dev);
cudaFree(u1_dev);
cudaFree(u2_dev);
free(u);
free(u1);
free(u2);
} |
5866305dcbefb198cb01c5a9841522181645608f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "j2d5pt-512-13-256_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_13(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 13;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 486;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
float __reg_12_0;
float __reg_12_1;
float __reg_12_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13);
const AN5D_TYPE __storeValid = __writeValid13;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_12_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_12_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_12_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_12_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_12_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_12_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_12_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_12_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_12_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_12_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_12_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_12_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_12_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(1, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(2, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(3, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(4, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(5, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(6, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(7, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(8, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(9, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(10, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(11, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(12, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(13, __reg_12_0, __reg_12_1, __reg_12_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(13, __reg_12_0, __reg_12_1, __reg_12_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 27; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2);
__STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0);
__STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1);
__STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h + 0, __reg_12_2, __reg_12_0, __reg_0_1);
}
}
else
{
for (__h = 27; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2);
__h++;
}
}
__global__ void kernel0_12(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 12;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __storeValid = __writeValid12;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_11_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_11_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_11_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_11_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_11_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_11_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_11_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_11_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_11_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_11_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_11_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_11_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(2, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(3, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(4, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(5, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(6, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(7, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(8, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(9, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(10, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(11, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(12, __reg_11_2, __reg_11_0, __reg_11_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(12, __reg_11_2, __reg_11_0, __reg_11_1);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h + 0, __reg_11_0, __reg_11_1, __reg_0_2);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1);
__h++;
}
}
__global__ void kernel0_11(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 11;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 490;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __storeValid = __writeValid11;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_10_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_10_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_10_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_10_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_10_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_10_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_10_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_10_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_10_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_10_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_10_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(1, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(3, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(4, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(5, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(6, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(7, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(8, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(9, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(10, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(11, __reg_10_1, __reg_10_2, __reg_10_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(11, __reg_10_1, __reg_10_2, __reg_10_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 23; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h + 0, __reg_10_1, __reg_10_2, __reg_0_0);
}
}
else
{
for (__h = 23; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0);
__h++;
}
}
__global__ void kernel0_10(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_9_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(3, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(5, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(6, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(7, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(8, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(9, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(10, __reg_9_0, __reg_9_1, __reg_9_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(10, __reg_9_0, __reg_9_1, __reg_9_2);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
}
__global__ void kernel0_9(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_8_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(2, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(3, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(5, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(6, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(7, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(8, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(9, __reg_8_2, __reg_8_0, __reg_8_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(9, __reg_8_2, __reg_8_0, __reg_8_1);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2);
}
}
else
{
for (__h = 19; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
}
}
__global__ void kernel0_8(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(1, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(5, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(6, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_0);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
}
__global__ void kernel0_7(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 498;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_2, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(5, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(6, __reg_6_2, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1);
}
}
else
{
for (__h = 15; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
}
}
__global__ void kernel0_6(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_1, __reg_5_2, __reg_5_0);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(5, __reg_5_1, __reg_5_2, __reg_5_0);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(6, __reg_5_2, __reg_5_0, __reg_5_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(6, __reg_5_2, __reg_5_0, __reg_5_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
}
__global__ void kernel0_5(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 502;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(1, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0);
}
}
else
{
for (__h = 11; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
}
}
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_2, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 506;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_1, __reg_2_2, __reg_2_0);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(1, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 510;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
}
}
| 5866305dcbefb198cb01c5a9841522181645608f.cu | #include "j2d5pt-512-13-256_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_13(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 13;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 486;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
float __reg_12_0;
float __reg_12_1;
float __reg_12_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13);
const AN5D_TYPE __storeValid = __writeValid13;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_12_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_12_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_12_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_12_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_12_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_12_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_12_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_12_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_12_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_12_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_12_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_12_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_12_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(1, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(2, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(3, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(4, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(5, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(6, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(7, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(8, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(9, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(10, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(11, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(12, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(13, __reg_12_0, __reg_12_1, __reg_12_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(13, __reg_12_0, __reg_12_1, __reg_12_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 27; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2);
__STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0);
__STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1);
__STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h + 0, __reg_12_2, __reg_12_0, __reg_0_1);
}
}
else
{
for (__h = 27; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2);
__h++;
}
}
__global__ void kernel0_12(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 12;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __storeValid = __writeValid12;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_11_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_11_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_11_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_11_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_11_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_11_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_11_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_11_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_11_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_11_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_11_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_11_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(2, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(3, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(4, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(5, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(6, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(7, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(8, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(9, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(10, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(11, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(12, __reg_11_2, __reg_11_0, __reg_11_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(12, __reg_11_2, __reg_11_0, __reg_11_1);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h + 0, __reg_11_0, __reg_11_1, __reg_0_2);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1);
__h++;
}
}
__global__ void kernel0_11(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 11;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 490;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __storeValid = __writeValid11;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_10_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_10_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_10_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_10_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_10_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_10_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_10_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_10_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_10_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_10_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_10_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(1, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(3, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(4, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(5, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(6, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(7, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(8, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(9, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(10, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(11, __reg_10_1, __reg_10_2, __reg_10_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(11, __reg_10_1, __reg_10_2, __reg_10_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 23; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h + 0, __reg_10_1, __reg_10_2, __reg_0_0);
}
}
else
{
for (__h = 23; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0);
__h++;
}
}
__global__ void kernel0_10(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_9_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(3, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(5, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(6, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(7, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(8, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(9, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(10, __reg_9_0, __reg_9_1, __reg_9_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(10, __reg_9_0, __reg_9_1, __reg_9_2);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
}
__global__ void kernel0_9(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_8_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(2, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(3, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(5, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(6, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(7, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(8, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(9, __reg_8_2, __reg_8_0, __reg_8_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(9, __reg_8_2, __reg_8_0, __reg_8_1);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2);
}
}
else
{
for (__h = 19; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
}
}
__global__ void kernel0_8(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(1, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(5, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(6, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_0);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
}
__global__ void kernel0_7(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 498;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_2, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(5, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(6, __reg_6_2, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1);
}
}
else
{
for (__h = 15; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
}
}
__global__ void kernel0_6(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_1, __reg_5_2, __reg_5_0);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(5, __reg_5_1, __reg_5_2, __reg_5_0);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(6, __reg_5_2, __reg_5_0, __reg_5_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(6, __reg_5_2, __reg_5_0, __reg_5_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
}
__global__ void kernel0_5(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 502;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(1, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0);
}
}
else
{
for (__h = 11; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
}
}
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_2, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 506;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_1, __reg_2_2, __reg_2_0);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(1, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 510;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
}
}
|
a91c4424e2fced074acb121accc6057516dc444d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <vector>
#include <iostream>
#include "yololayer.h"
#include "cuda_utils.h"
namespace Tn
{
template<typename T>
void write(char*& buffer, const T& val)
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template<typename T>
void read(const char*& buffer, T& val)
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
}
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel)
{
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
mYoloKernel = vYoloKernel; // 4head(xl,l,m,s), w,h,anchors
mKernelCount = vYoloKernel.size(); // 4
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipFree(mAnchor[ii]));
}
CUDA_CHECK(hipHostFree(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
// GPU
inline __device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
// GPUCPU
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem)
{
// threadIdxblockIdxblockDim
int idx = threadIdx.x + blockDim.x * blockIdx.x;
// noElementscell
if (idx >= noElements) return;
// printf("threadIdx.x:%d, blockDim.x:%d, blockIdx.x:%d, noElements:%d\n", threadIdx.x, blockDim.x, blockIdx.x, noElements);
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < CHECK_COUNT; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]) * box_prob;
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
// pytorch:
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
// X: (sigmoid(tx) + cx)/FeaturemapW * netwidth
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
// printf("k:%d yoloHeight:%d yoloWidth:%d idx:%d row:%d col:%d det->bbox[0]:%f det->bbox[1]:%f\n", k, yoloHeight, yoloWidth, idx, row, col, det->bbox[0], det->bbox[1]);
// W: (Pw * e^tw) / FeaturemapW * netwidth
// v5: https://github.com/ultralytics/yolov5/issues/471
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize)
{
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(hipMemset(output + idx * outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize; // cell
if (numElem < mThreadCount)
mThreadCount = numElem;
// printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight);
hipLaunchKernelGGL(( CalDetection) , dim3((yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount), dim3(mThreadCount) , 0, 0,
inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
int class_count = -1;
int input_w = -1;
int input_h = -1;
int max_output_object_count = -1;
std::vector<Yolo::YoloKernel> yolo_kernels(4); // 4head(xl,l,m,s)
const PluginField* fields = fc->fields; // pluginMultidata: [name, data, type, length]
// pluginMultidataanchor
for (int i = 0; i < fc->nbFields; i++) { // nbFields=5
if (strcmp(fields[i].name, "netdata") == 0) { //
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
class_count = tmp[0]; //
input_w = tmp[1]; //
input_h = tmp[2]; //
max_output_object_count = tmp[3]; //
} else if (strstr(fields[i].name, "yolodata") != NULL) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
YoloKernel kernel;
kernel.width = tmp[0]; //
kernel.height = tmp[1]; //
for (int j = 0; j < fields[i].length - 2; j++) {
kernel.anchors[j] = tmp[j + 2]; // anchor
}
// fields[i].name[8]: 1,2,3,4->0,1,2,3->3,2,1,0(s,m,l,xl)
yolo_kernels[3 - (fields[i].name[8] - '1')] = kernel;
}
}
assert(class_count && input_w && input_h && max_output_object_count);
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, yolo_kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| a91c4424e2fced074acb121accc6057516dc444d.cu | #include <assert.h>
#include <vector>
#include <iostream>
#include "yololayer.h"
#include "cuda_utils.h"
namespace Tn
{
template<typename T>
void write(char*& buffer, const T& val)
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template<typename T>
void read(const char*& buffer, T& val)
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
}
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel)
{
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
mYoloKernel = vYoloKernel; // 4个head(xl,l,m,s), 包含:特征图w,h,anchors
mKernelCount = vYoloKernel.size(); // 4尺度输出
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaFree(mAnchor[ii]));
}
CUDA_CHECK(cudaFreeHost(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
// GPU调用
inline __device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
// GPU或CPU都可调用
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem)
{
// threadIdx线程索引,blockIdx线程块索引,blockDim线程块大小
int idx = threadIdx.x + blockDim.x * blockIdx.x;
// noElements一个尺度特征图的cell数量
if (idx >= noElements) return;
// printf("threadIdx.x:%d, blockDim.x:%d, blockIdx.x:%d, noElements:%d\n", threadIdx.x, blockDim.x, blockIdx.x, noElements);
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < CHECK_COUNT; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]) * box_prob;
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
// pytorch:
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
// X: (sigmoid(tx) + cx)/FeaturemapW * netwidth
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
// printf("k:%d yoloHeight:%d yoloWidth:%d idx:%d row:%d col:%d det->bbox[0]:%f det->bbox[1]:%f\n", k, yoloHeight, yoloWidth, idx, row, col, det->bbox[0], det->bbox[1]);
// W: (Pw * e^tw) / FeaturemapW * netwidth
// v5: https://github.com/ultralytics/yolov5/issues/471
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize)
{
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemset(output + idx * outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize; // 每个尺度特征图的cell数量
if (numElem < mThreadCount)
mThreadCount = numElem;
// printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight);
CalDetection <<< (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount >>>
(inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
int class_count = -1;
int input_w = -1;
int input_h = -1;
int max_output_object_count = -1;
std::vector<Yolo::YoloKernel> yolo_kernels(4); // 4个head(xl,l,m,s)
const PluginField* fields = fc->fields; // pluginMultidata: [name, data, type, length]
// 提取pluginMultidata的输入宽高和anchor参数
for (int i = 0; i < fc->nbFields; i++) { // nbFields=5
if (strcmp(fields[i].name, "netdata") == 0) { // 网络参数
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
class_count = tmp[0]; // 类别数
input_w = tmp[1]; // 图像输入宽度
input_h = tmp[2]; // 图像输入高度
max_output_object_count = tmp[3]; // 最大检测目标数
} else if (strstr(fields[i].name, "yolodata") != NULL) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
YoloKernel kernel;
kernel.width = tmp[0]; // 特征图宽度
kernel.height = tmp[1]; // 特征图高度
for (int j = 0; j < fields[i].length - 2; j++) {
kernel.anchors[j] = tmp[j + 2]; // anchor尺寸
}
// fields[i].name[8]: 1,2,3,4->0,1,2,3->3,2,1,0(s,m,l,xl)
yolo_kernels[3 - (fields[i].name[8] - '1')] = kernel;
}
}
assert(class_count && input_w && input_h && max_output_object_count);
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, yolo_kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
4eb5b40c4e4f86a8b79d272456622422a0119c90.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include <algorithm>
#include <chrono>
#include <getopt.h>
#include <iostream>
#include <string>
#include <deque>
#include <mutex>
#include <future>
#include <thread>
#include <atomic>
#include <claragenomics/logging/logging.hpp>
#include <claragenomics/io/fasta_parser.hpp>
#include <claragenomics/utils/cudautils.hpp>
#include <claragenomics/utils/signed_integer_utils.hpp>
#include <claragenomics/cudamapper/index.hpp>
#include <claragenomics/cudamapper/matcher.hpp>
#include <claragenomics/cudamapper/overlapper.hpp>
#include "overlapper_triggered.hpp"
static struct option options[] = {
{"kmer-size", required_argument, 0, 'k'},
{"window-size", required_argument, 0, 'w'},
{"num-devices", required_argument, 0, 'd'},
{"max-index-cache-size", required_argument, 0, 'c'},
{"max-cached-memory", required_argument, 0, 'm'},
{"index-size", required_argument, 0, 'i'},
{"target-index-size", required_argument, 0, 't'},
{"filtering-parameter", required_argument, 0, 'F'},
{"help", no_argument, 0, 'h'},
};
void help(int32_t exit_code);
int main(int argc, char* argv[])
{
using claragenomics::get_size;
claragenomics::logging::Init();
uint32_t k = 15; // k
uint32_t w = 15; // w
std::int32_t num_devices = 1; // d
std::int32_t max_index_cache_size = 100; // c
std::int32_t max_cached_memory = 1; // m
std::int32_t index_size = 10000; // i
std::int32_t target_index_size = 10000; // t
double filtering_parameter = 1.0; // F
std::string optstring = "k:w:d:c:m:i:t:F:h:";
int32_t argument = 0;
while ((argument = getopt_long(argc, argv, optstring.c_str(), options, nullptr)) != -1)
{
switch (argument)
{
case 'k':
k = atoi(optarg);
break;
case 'w':
w = atoi(optarg);
break;
case 'd':
num_devices = atoi(optarg);
break;
case 'c':
max_index_cache_size = atoi(optarg);
break;
case 'm':
max_cached_memory = atoi(optarg);
break;
case 'i':
index_size = atoi(optarg);
break;
case 't':
target_index_size = atoi(optarg);
break;
case 'F':
filtering_parameter = atof(optarg);
break;
case 'h':
help(0);
default:
exit(1);
}
}
if (k > claragenomics::cudamapper::Index::maximum_kmer_size())
{
std::cerr << "kmer of size " << k << " is not allowed, maximum k = " << claragenomics::cudamapper::Index::maximum_kmer_size() << std::endl;
exit(1);
}
if (filtering_parameter > 1.0 || filtering_parameter < 0.0)
{
std::cerr << "-F / --filtering-parameter must be in range [0.0, 1.0]" << std::endl;
exit(1);
}
if (max_cached_memory <= 0)
{
std::cerr << "-m / --max-cached-memory must be larger than zero" << std::endl;
exit(1);
}
// Check remaining argument count.
if ((argc - optind) < 2)
{
std::cerr << "Invalid inputs. Please refer to the help function." << std::endl;
help(1);
}
std::string query_filepath = std::string(argv[optind++]);
std::string target_filepath = std::string(argv[optind++]);
bool all_to_all = false;
if (query_filepath == target_filepath)
{
all_to_all = true;
target_index_size = index_size;
std::cerr << "NOTE - Since query and target files are same, activating all_to_all mode. Query index size used for both files." << std::endl;
}
std::unique_ptr<claragenomics::io::FastaParser> query_parser = claragenomics::io::create_fasta_parser(query_filepath);
int32_t queries = query_parser->get_num_seqences();
std::unique_ptr<claragenomics::io::FastaParser> target_parser = claragenomics::io::create_fasta_parser(target_filepath);
int32_t targets = target_parser->get_num_seqences();
std::cerr << "Query " << query_filepath << " index " << queries << std::endl;
std::cerr << "Target " << target_filepath << " index " << targets << std::endl;
// Data structure for holding overlaps to be written out
std::mutex overlaps_writer_mtx;
struct query_target_range
{
std::pair<std::int32_t, int32_t> query_range;
std::vector<std::pair<std::int32_t, int32_t>> target_ranges;
};
//First generate all the ranges independently, then loop over them.
std::vector<query_target_range> query_target_ranges;
for (std::int32_t query_start_index = 0; query_start_index < queries; query_start_index += index_size)
{
std::int32_t query_end_index = ::min(query_start_index + index_size, queries);
query_target_range q;
q.query_range = std::make_pair(query_start_index, query_end_index);
std::int32_t target_start_index = 0;
// If all_to_all mode, then we can optimzie by starting the target sequences from the same index as
// query because all indices before the current query index are guaranteed to have been processed in
// a2a mapping.
if (all_to_all)
{
target_start_index = query_start_index;
}
for (; target_start_index < targets; target_start_index += target_index_size)
{
std::int32_t target_end_index = ::min(target_start_index + target_index_size,
targets);
q.target_ranges.push_back(std::make_pair(target_start_index, target_end_index));
}
query_target_ranges.push_back(q);
}
// This is a per-device cache, if it has the index it will return it, if not it will generate it, store and return it.
std::vector<std::map<std::pair<uint64_t, uint64_t>, std::shared_ptr<claragenomics::cudamapper::Index>>> index_cache(num_devices);
auto get_index = [&index_cache, max_index_cache_size](std::shared_ptr<claragenomics::DeviceAllocator> allocator,
claragenomics::io::FastaParser& parser,
const claragenomics::cudamapper::read_id_t start_index,
const claragenomics::cudamapper::read_id_t end_index,
const std::uint64_t k,
const std::uint64_t w,
const int device_id,
const bool allow_cache_index) {
CGA_NVTX_RANGE(profiler, "get index");
std::pair<uint64_t, uint64_t> key;
key.first = start_index;
key.second = end_index;
std::shared_ptr<claragenomics::cudamapper::Index> index;
if (index_cache[device_id].count(key))
{
index = index_cache[device_id][key];
}
else
{
index = std::move(claragenomics::cudamapper::Index::create_index(allocator, parser, start_index, end_index, k, w));
// If in all-to-all mode, put this query in the cache for later use.
// Cache eviction is handled later on by the calling thread
// using the evict_index function.
if (get_size<int32_t>(index_cache[device_id]) < max_index_cache_size && allow_cache_index)
{
index_cache[device_id][key] = index;
}
}
return index;
};
// When performing all-to-all mapping, indices are instantitated as start-end-ranges in the reads.
// As such, once a query index has been used it will not be needed again. For example, parsing ranges
// [0-999], [1000-1999], [2000-2999], the caching/eviction would be as follows:
//
// Round 1
// Query: [0-999] - Enter cache
// Target: [1000-1999] - Enter cache
// Target: [1999 - 2999] - Enter cache
// Evict [0-999]
// Round 2
// Query: [1000-1999] - Use cache entry (from previous use when now query was a target)
// Etc..
auto evict_index = [&index_cache](const claragenomics::cudamapper::read_id_t query_start_index,
const claragenomics::cudamapper::read_id_t query_end_index,
const int device_id) {
std::pair<uint64_t, uint64_t> key;
key.first = query_start_index;
key.second = query_end_index;
index_cache[device_id].erase(key);
};
#ifdef CGA_ENABLE_ALLOCATOR
auto max_cached_bytes = max_cached_memory * 1e9; // max_cached_memory is in GB
std::shared_ptr<claragenomics::DeviceAllocator> allocator(new claragenomics::CachingDeviceAllocator(max_cached_bytes));
#else
std::shared_ptr<claragenomics::DeviceAllocator> allocator(new claragenomics::CudaMallocAllocator());
#endif
auto compute_overlaps = [&](const query_target_range query_target_range, const int device_id) {
std::vector<std::shared_ptr<std::future<void>>> print_pafs_futures;
hipSetDevice(device_id);
auto query_start_index = query_target_range.query_range.first;
auto query_end_index = query_target_range.query_range.second;
std::cerr << "Processing query range: (" << query_start_index << " - " << query_end_index - 1 << ")" << std::endl;
std::shared_ptr<claragenomics::cudamapper::Index> query_index(nullptr);
std::shared_ptr<claragenomics::cudamapper::Index> target_index(nullptr);
std::unique_ptr<claragenomics::cudamapper::Matcher> matcher(nullptr);
{
CGA_NVTX_RANGE(profiler, "generate_query_index");
query_index = get_index(allocator, *query_parser, query_start_index, query_end_index, k, w, device_id, all_to_all);
}
//Main loop
for (const auto target_range : query_target_range.target_ranges)
{
auto target_start_index = target_range.first;
auto target_end_index = target_range.second;
{
CGA_NVTX_RANGE(profiler, "generate_target_index");
target_index = get_index(allocator, *target_parser, target_start_index, target_end_index, k, w, device_id, true);
}
{
CGA_NVTX_RANGE(profiler, "generate_matcher");
matcher = claragenomics::cudamapper::Matcher::create_matcher(allocator, *query_index, *target_index);
}
{
claragenomics::cudamapper::OverlapperTriggered overlapper(allocator);
CGA_NVTX_RANGE(profiler, "generate_overlaps");
// Get unfiltered overlaps
std::vector<claragenomics::cudamapper::Overlap> overlaps_to_add;
overlapper.get_overlaps(overlaps_to_add, matcher->anchors(), *query_index, *target_index);
std::shared_ptr<std::future<void>> write_and_filter_overlaps_future = std::make_shared<std::future<void>>(std::async(
std::launch::async,
[&overlaps_writer_mtx, overlaps_to_add](std::vector<claragenomics::cudamapper::Overlap> overlaps) {
std::vector<claragenomics::cudamapper::Overlap> filtered_overlaps;
claragenomics::cudamapper::Overlapper::filter_overlaps(filtered_overlaps, overlaps_to_add);
std::lock_guard<std::mutex> lck(overlaps_writer_mtx);
claragenomics::cudamapper::Overlapper::print_paf(filtered_overlaps);
},
overlaps_to_add));
print_pafs_futures.push_back(write_and_filter_overlaps_future);
}
// reseting the matcher releases the anchor device array back to memory pool
matcher.reset();
}
// If all-to-all mapping query will no longer be needed on device, remove it from the cache
if (all_to_all)
{
evict_index(query_start_index, query_end_index, device_id);
}
return print_pafs_futures;
};
// The application (File parsing, index generation, overlap generation etc) is all launched from here.
// The main application works as follows:
// 1. Launch a worker thread per device (GPU).
// 2. Each worker takes target-query ranges off a queue
// 3. Each worker pushes vector of futures (since overlap writing is dispatched to an async thread on host). All futures are waited for before the main application exits.
std::vector<std::thread> workers;
std::atomic<int> ranges_idx(0);
std::vector<std::vector<std::shared_ptr<std::future<void>>>> overlap_futures;
std::mutex overlap_futures_mtx;
// Launch worker threads
for (int device_id = 0; device_id < num_devices; device_id++)
{
workers.push_back(std::thread(
[&, device_id]() {
while (ranges_idx < get_size<int>(query_target_ranges))
{
int range_idx = ranges_idx.fetch_add(1);
//Need to perform this check again for thread-safety
if (range_idx < get_size<int>(query_target_ranges))
{
auto overlap_future = compute_overlaps(query_target_ranges[range_idx], device_id);
std::lock_guard<std::mutex> lck(overlap_futures_mtx);
overlap_futures.push_back(overlap_future);
}
}
}));
}
// Wait for all per-device threads to terminate
std::for_each(workers.begin(), workers.end(), [](std::thread& t) {
t.join();
});
// Wait for all futures (for overlap writing) to return
for (auto& overlap_future : overlap_futures)
{
for (auto future : overlap_future)
{
future->wait();
}
}
return 0;
}
void help(int32_t exit_code = 0)
{
std::cerr <<
R"(Usage: cudamapper [options ...] <query_sequences> <target_sequences>
<sequences>
Input file in FASTA/FASTQ format (can be compressed with gzip)
containing sequences used for all-to-all overlapping
options:
-k, --kmer-size
length of kmer to use for minimizers [15] (Max=)"
<< claragenomics::cudamapper::Index::maximum_kmer_size() << ")"
<< R"(
-w, --window-size
length of window to use for minimizers [15])"
<< R"(
-d, --num-devices
number of GPUs to use [1])"
<< R"(
-c, --max-index-cache-size
number of indices to keep in GPU memory [100])"
<< R"(
-m, --max-cached-memory
maximum aggregate cached memory per device in GB [1])"
<< R"(
-i, --index-size
length of batch size used for query [10000])"
<< R"(
-t --target-index-size
length of batch sized used for target [10000])"
<< R"(
-F --filtering-parameter
filter all representations for which sketch_elements_with_that_representation/total_sketch_elements >= filtering_parameter), filtering disabled if filtering_parameter == 1.0 [1'000'000'001] (Min = 0.0, Max = 1.0))"
<< std::endl;
exit(exit_code);
}
| 4eb5b40c4e4f86a8b79d272456622422a0119c90.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include <algorithm>
#include <chrono>
#include <getopt.h>
#include <iostream>
#include <string>
#include <deque>
#include <mutex>
#include <future>
#include <thread>
#include <atomic>
#include <claragenomics/logging/logging.hpp>
#include <claragenomics/io/fasta_parser.hpp>
#include <claragenomics/utils/cudautils.hpp>
#include <claragenomics/utils/signed_integer_utils.hpp>
#include <claragenomics/cudamapper/index.hpp>
#include <claragenomics/cudamapper/matcher.hpp>
#include <claragenomics/cudamapper/overlapper.hpp>
#include "overlapper_triggered.hpp"
static struct option options[] = {
{"kmer-size", required_argument, 0, 'k'},
{"window-size", required_argument, 0, 'w'},
{"num-devices", required_argument, 0, 'd'},
{"max-index-cache-size", required_argument, 0, 'c'},
{"max-cached-memory", required_argument, 0, 'm'},
{"index-size", required_argument, 0, 'i'},
{"target-index-size", required_argument, 0, 't'},
{"filtering-parameter", required_argument, 0, 'F'},
{"help", no_argument, 0, 'h'},
};
void help(int32_t exit_code);
int main(int argc, char* argv[])
{
using claragenomics::get_size;
claragenomics::logging::Init();
uint32_t k = 15; // k
uint32_t w = 15; // w
std::int32_t num_devices = 1; // d
std::int32_t max_index_cache_size = 100; // c
std::int32_t max_cached_memory = 1; // m
std::int32_t index_size = 10000; // i
std::int32_t target_index_size = 10000; // t
double filtering_parameter = 1.0; // F
std::string optstring = "k:w:d:c:m:i:t:F:h:";
int32_t argument = 0;
while ((argument = getopt_long(argc, argv, optstring.c_str(), options, nullptr)) != -1)
{
switch (argument)
{
case 'k':
k = atoi(optarg);
break;
case 'w':
w = atoi(optarg);
break;
case 'd':
num_devices = atoi(optarg);
break;
case 'c':
max_index_cache_size = atoi(optarg);
break;
case 'm':
max_cached_memory = atoi(optarg);
break;
case 'i':
index_size = atoi(optarg);
break;
case 't':
target_index_size = atoi(optarg);
break;
case 'F':
filtering_parameter = atof(optarg);
break;
case 'h':
help(0);
default:
exit(1);
}
}
if (k > claragenomics::cudamapper::Index::maximum_kmer_size())
{
std::cerr << "kmer of size " << k << " is not allowed, maximum k = " << claragenomics::cudamapper::Index::maximum_kmer_size() << std::endl;
exit(1);
}
if (filtering_parameter > 1.0 || filtering_parameter < 0.0)
{
std::cerr << "-F / --filtering-parameter must be in range [0.0, 1.0]" << std::endl;
exit(1);
}
if (max_cached_memory <= 0)
{
std::cerr << "-m / --max-cached-memory must be larger than zero" << std::endl;
exit(1);
}
// Check remaining argument count.
if ((argc - optind) < 2)
{
std::cerr << "Invalid inputs. Please refer to the help function." << std::endl;
help(1);
}
std::string query_filepath = std::string(argv[optind++]);
std::string target_filepath = std::string(argv[optind++]);
bool all_to_all = false;
if (query_filepath == target_filepath)
{
all_to_all = true;
target_index_size = index_size;
std::cerr << "NOTE - Since query and target files are same, activating all_to_all mode. Query index size used for both files." << std::endl;
}
std::unique_ptr<claragenomics::io::FastaParser> query_parser = claragenomics::io::create_fasta_parser(query_filepath);
int32_t queries = query_parser->get_num_seqences();
std::unique_ptr<claragenomics::io::FastaParser> target_parser = claragenomics::io::create_fasta_parser(target_filepath);
int32_t targets = target_parser->get_num_seqences();
std::cerr << "Query " << query_filepath << " index " << queries << std::endl;
std::cerr << "Target " << target_filepath << " index " << targets << std::endl;
// Data structure for holding overlaps to be written out
std::mutex overlaps_writer_mtx;
struct query_target_range
{
std::pair<std::int32_t, int32_t> query_range;
std::vector<std::pair<std::int32_t, int32_t>> target_ranges;
};
//First generate all the ranges independently, then loop over them.
std::vector<query_target_range> query_target_ranges;
for (std::int32_t query_start_index = 0; query_start_index < queries; query_start_index += index_size)
{
std::int32_t query_end_index = std::min(query_start_index + index_size, queries);
query_target_range q;
q.query_range = std::make_pair(query_start_index, query_end_index);
std::int32_t target_start_index = 0;
// If all_to_all mode, then we can optimzie by starting the target sequences from the same index as
// query because all indices before the current query index are guaranteed to have been processed in
// a2a mapping.
if (all_to_all)
{
target_start_index = query_start_index;
}
for (; target_start_index < targets; target_start_index += target_index_size)
{
std::int32_t target_end_index = std::min(target_start_index + target_index_size,
targets);
q.target_ranges.push_back(std::make_pair(target_start_index, target_end_index));
}
query_target_ranges.push_back(q);
}
// This is a per-device cache, if it has the index it will return it, if not it will generate it, store and return it.
std::vector<std::map<std::pair<uint64_t, uint64_t>, std::shared_ptr<claragenomics::cudamapper::Index>>> index_cache(num_devices);
auto get_index = [&index_cache, max_index_cache_size](std::shared_ptr<claragenomics::DeviceAllocator> allocator,
claragenomics::io::FastaParser& parser,
const claragenomics::cudamapper::read_id_t start_index,
const claragenomics::cudamapper::read_id_t end_index,
const std::uint64_t k,
const std::uint64_t w,
const int device_id,
const bool allow_cache_index) {
CGA_NVTX_RANGE(profiler, "get index");
std::pair<uint64_t, uint64_t> key;
key.first = start_index;
key.second = end_index;
std::shared_ptr<claragenomics::cudamapper::Index> index;
if (index_cache[device_id].count(key))
{
index = index_cache[device_id][key];
}
else
{
index = std::move(claragenomics::cudamapper::Index::create_index(allocator, parser, start_index, end_index, k, w));
// If in all-to-all mode, put this query in the cache for later use.
// Cache eviction is handled later on by the calling thread
// using the evict_index function.
if (get_size<int32_t>(index_cache[device_id]) < max_index_cache_size && allow_cache_index)
{
index_cache[device_id][key] = index;
}
}
return index;
};
// When performing all-to-all mapping, indices are instantitated as start-end-ranges in the reads.
// As such, once a query index has been used it will not be needed again. For example, parsing ranges
// [0-999], [1000-1999], [2000-2999], the caching/eviction would be as follows:
//
// Round 1
// Query: [0-999] - Enter cache
// Target: [1000-1999] - Enter cache
// Target: [1999 - 2999] - Enter cache
// Evict [0-999]
// Round 2
// Query: [1000-1999] - Use cache entry (from previous use when now query was a target)
// Etc..
auto evict_index = [&index_cache](const claragenomics::cudamapper::read_id_t query_start_index,
const claragenomics::cudamapper::read_id_t query_end_index,
const int device_id) {
std::pair<uint64_t, uint64_t> key;
key.first = query_start_index;
key.second = query_end_index;
index_cache[device_id].erase(key);
};
#ifdef CGA_ENABLE_ALLOCATOR
auto max_cached_bytes = max_cached_memory * 1e9; // max_cached_memory is in GB
std::shared_ptr<claragenomics::DeviceAllocator> allocator(new claragenomics::CachingDeviceAllocator(max_cached_bytes));
#else
std::shared_ptr<claragenomics::DeviceAllocator> allocator(new claragenomics::CudaMallocAllocator());
#endif
auto compute_overlaps = [&](const query_target_range query_target_range, const int device_id) {
std::vector<std::shared_ptr<std::future<void>>> print_pafs_futures;
cudaSetDevice(device_id);
auto query_start_index = query_target_range.query_range.first;
auto query_end_index = query_target_range.query_range.second;
std::cerr << "Processing query range: (" << query_start_index << " - " << query_end_index - 1 << ")" << std::endl;
std::shared_ptr<claragenomics::cudamapper::Index> query_index(nullptr);
std::shared_ptr<claragenomics::cudamapper::Index> target_index(nullptr);
std::unique_ptr<claragenomics::cudamapper::Matcher> matcher(nullptr);
{
CGA_NVTX_RANGE(profiler, "generate_query_index");
query_index = get_index(allocator, *query_parser, query_start_index, query_end_index, k, w, device_id, all_to_all);
}
//Main loop
for (const auto target_range : query_target_range.target_ranges)
{
auto target_start_index = target_range.first;
auto target_end_index = target_range.second;
{
CGA_NVTX_RANGE(profiler, "generate_target_index");
target_index = get_index(allocator, *target_parser, target_start_index, target_end_index, k, w, device_id, true);
}
{
CGA_NVTX_RANGE(profiler, "generate_matcher");
matcher = claragenomics::cudamapper::Matcher::create_matcher(allocator, *query_index, *target_index);
}
{
claragenomics::cudamapper::OverlapperTriggered overlapper(allocator);
CGA_NVTX_RANGE(profiler, "generate_overlaps");
// Get unfiltered overlaps
std::vector<claragenomics::cudamapper::Overlap> overlaps_to_add;
overlapper.get_overlaps(overlaps_to_add, matcher->anchors(), *query_index, *target_index);
std::shared_ptr<std::future<void>> write_and_filter_overlaps_future = std::make_shared<std::future<void>>(std::async(
std::launch::async,
[&overlaps_writer_mtx, overlaps_to_add](std::vector<claragenomics::cudamapper::Overlap> overlaps) {
std::vector<claragenomics::cudamapper::Overlap> filtered_overlaps;
claragenomics::cudamapper::Overlapper::filter_overlaps(filtered_overlaps, overlaps_to_add);
std::lock_guard<std::mutex> lck(overlaps_writer_mtx);
claragenomics::cudamapper::Overlapper::print_paf(filtered_overlaps);
},
overlaps_to_add));
print_pafs_futures.push_back(write_and_filter_overlaps_future);
}
// reseting the matcher releases the anchor device array back to memory pool
matcher.reset();
}
// If all-to-all mapping query will no longer be needed on device, remove it from the cache
if (all_to_all)
{
evict_index(query_start_index, query_end_index, device_id);
}
return print_pafs_futures;
};
// The application (File parsing, index generation, overlap generation etc) is all launched from here.
// The main application works as follows:
// 1. Launch a worker thread per device (GPU).
// 2. Each worker takes target-query ranges off a queue
// 3. Each worker pushes vector of futures (since overlap writing is dispatched to an async thread on host). All futures are waited for before the main application exits.
std::vector<std::thread> workers;
std::atomic<int> ranges_idx(0);
std::vector<std::vector<std::shared_ptr<std::future<void>>>> overlap_futures;
std::mutex overlap_futures_mtx;
// Launch worker threads
for (int device_id = 0; device_id < num_devices; device_id++)
{
workers.push_back(std::thread(
[&, device_id]() {
while (ranges_idx < get_size<int>(query_target_ranges))
{
int range_idx = ranges_idx.fetch_add(1);
//Need to perform this check again for thread-safety
if (range_idx < get_size<int>(query_target_ranges))
{
auto overlap_future = compute_overlaps(query_target_ranges[range_idx], device_id);
std::lock_guard<std::mutex> lck(overlap_futures_mtx);
overlap_futures.push_back(overlap_future);
}
}
}));
}
// Wait for all per-device threads to terminate
std::for_each(workers.begin(), workers.end(), [](std::thread& t) {
t.join();
});
// Wait for all futures (for overlap writing) to return
for (auto& overlap_future : overlap_futures)
{
for (auto future : overlap_future)
{
future->wait();
}
}
return 0;
}
void help(int32_t exit_code = 0)
{
std::cerr <<
R"(Usage: cudamapper [options ...] <query_sequences> <target_sequences>
<sequences>
Input file in FASTA/FASTQ format (can be compressed with gzip)
containing sequences used for all-to-all overlapping
options:
-k, --kmer-size
length of kmer to use for minimizers [15] (Max=)"
<< claragenomics::cudamapper::Index::maximum_kmer_size() << ")"
<< R"(
-w, --window-size
length of window to use for minimizers [15])"
<< R"(
-d, --num-devices
number of GPUs to use [1])"
<< R"(
-c, --max-index-cache-size
number of indices to keep in GPU memory [100])"
<< R"(
-m, --max-cached-memory
maximum aggregate cached memory per device in GB [1])"
<< R"(
-i, --index-size
length of batch size used for query [10000])"
<< R"(
-t --target-index-size
length of batch sized used for target [10000])"
<< R"(
-F --filtering-parameter
filter all representations for which sketch_elements_with_that_representation/total_sketch_elements >= filtering_parameter), filtering disabled if filtering_parameter == 1.0 [1'000'000'001] (Min = 0.0, Max = 1.0))"
<< std::endl;
exit(exit_code);
}
|
c8b2b5c6d73774ffe173a54aa011de6457f1ace2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialAveragePooling.cu"
#else
#include <THHUNN/common.h>
static inline void THNN_(SpatialAveragePooling_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput,
int kH, int kW, int dH, int dW, int padH, int padW, bool ceil_mode) {
THArgCheck(kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, !input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size, but got "
"padW = %d, padH = %d, kW = %d, kH = %d",
padW, padH, kW, kH);
int64_t nInputPlane = input->size(dimh-1);
int64_t nInputRows = input->size(dimh);
int64_t nInputCols = input->size(dimw);
int64_t nOutputRows, nOutputCols;
int64_t nOutputPlane = nInputPlane;
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
if (nOutputCols < 1 || nOutputRows < 1)
THError("Given input size: (%dx%dx%d). "
"Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,nInputRows,nInputCols,nInputPlane,nOutputRows,nOutputCols);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, nOutputCols);
}
}
void THNN_(SpatialAveragePooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode,
bool count_include_pad)
{
THCUNN_assertSameGPU(state, 2, input, output);
THNN_(SpatialAveragePooling_shapeCheck)
(state, input, NULL, kH, kW, dH, dW,
padH, padW, ceil_mode);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->dim() == 3) {
nInputCols = input->size(2);
nInputRows = input->size(1);
nInputPlane = input->size(0);
batchSize = 1;
}
else
{
nInputCols = input->size(3);
nInputRows = input->size(2);
nInputPlane = input->size(1);
batchSize = input->size(0);
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCTensor_(newContiguous)(state, input);
scalar_t* input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
scalar_t* output_data = THCTensor_(data)(state, output);
int count = THCTensor_(nElement)(state, output);
if(count_include_pad)
hipLaunchKernelGGL(( AvePoolForward<scalar_t, accreal, true>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data);
else
hipLaunchKernelGGL(( AvePoolForward<scalar_t, accreal, false>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data);
THCudaCheck(hipGetLastError());
if(input->dim() == 3)
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
THCTensor_(free)(state, input);
}
void THNN_(SpatialAveragePooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode,
bool count_include_pad)
{
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput);
THNN_(SpatialAveragePooling_shapeCheck)
(state, input, gradOutput, kH, kW, dH, dW,
padH, padW, ceil_mode);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
int dimCol = 2;
int dimRow = 1;
if (input->dim() == 3) {
nInputPlane = input->size(0);
batchSize = 1;
}
else
{
dimCol = 3;
dimRow = 2;
nInputPlane = input->size(1);
batchSize = input->size(0);
}
nInputCols = input->size(dimCol);
nInputRows = input->size(dimRow);
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
THCUNN_check_dim_size(state, gradOutput, input->dim(), dimRow, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, input->dim(), dimCol, nOutputCols);
THCTensor_(resizeAs)(state, gradInput, input);
int count = THCTensor_(nElement)(state, input);
if(count_include_pad)
hipLaunchKernelGGL(( AvePoolBackward<scalar_t, accreal, true>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count,
THCTensor_(data)(state, gradOutput),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCTensor_(data)(state, gradInput));
else
hipLaunchKernelGGL(( AvePoolBackward<scalar_t, accreal, false>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count,
THCTensor_(data)(state, gradOutput),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCTensor_(data)(state, gradInput));
THCudaCheck(hipGetLastError());
// clean
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
| c8b2b5c6d73774ffe173a54aa011de6457f1ace2.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialAveragePooling.cu"
#else
#include <THCUNN/common.h>
static inline void THNN_(SpatialAveragePooling_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput,
int kH, int kW, int dH, int dW, int padH, int padW, bool ceil_mode) {
THArgCheck(kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, !input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size, but got "
"padW = %d, padH = %d, kW = %d, kH = %d",
padW, padH, kW, kH);
int64_t nInputPlane = input->size(dimh-1);
int64_t nInputRows = input->size(dimh);
int64_t nInputCols = input->size(dimw);
int64_t nOutputRows, nOutputCols;
int64_t nOutputPlane = nInputPlane;
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
if (nOutputCols < 1 || nOutputRows < 1)
THError("Given input size: (%dx%dx%d). "
"Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,nInputRows,nInputCols,nInputPlane,nOutputRows,nOutputCols);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, nOutputCols);
}
}
void THNN_(SpatialAveragePooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode,
bool count_include_pad)
{
THCUNN_assertSameGPU(state, 2, input, output);
THNN_(SpatialAveragePooling_shapeCheck)
(state, input, NULL, kH, kW, dH, dW,
padH, padW, ceil_mode);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->dim() == 3) {
nInputCols = input->size(2);
nInputRows = input->size(1);
nInputPlane = input->size(0);
batchSize = 1;
}
else
{
nInputCols = input->size(3);
nInputRows = input->size(2);
nInputPlane = input->size(1);
batchSize = input->size(0);
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCTensor_(newContiguous)(state, input);
scalar_t* input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
scalar_t* output_data = THCTensor_(data)(state, output);
int count = THCTensor_(nElement)(state, output);
if(count_include_pad)
AvePoolForward<scalar_t, accreal, true>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>(
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data);
else
AvePoolForward<scalar_t, accreal, false>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>(
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data);
THCudaCheck(cudaGetLastError());
if(input->dim() == 3)
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
THCTensor_(free)(state, input);
}
void THNN_(SpatialAveragePooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode,
bool count_include_pad)
{
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput);
THNN_(SpatialAveragePooling_shapeCheck)
(state, input, gradOutput, kH, kW, dH, dW,
padH, padW, ceil_mode);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
int dimCol = 2;
int dimRow = 1;
if (input->dim() == 3) {
nInputPlane = input->size(0);
batchSize = 1;
}
else
{
dimCol = 3;
dimRow = 2;
nInputPlane = input->size(1);
batchSize = input->size(0);
}
nInputCols = input->size(dimCol);
nInputRows = input->size(dimRow);
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
THCUNN_check_dim_size(state, gradOutput, input->dim(), dimRow, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, input->dim(), dimCol, nOutputCols);
THCTensor_(resizeAs)(state, gradInput, input);
int count = THCTensor_(nElement)(state, input);
if(count_include_pad)
AvePoolBackward<scalar_t, accreal, true>
<<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count,
THCTensor_(data)(state, gradOutput),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCTensor_(data)(state, gradInput));
else
AvePoolBackward<scalar_t, accreal, false>
<<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count,
THCTensor_(data)(state, gradOutput),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCTensor_(data)(state, gradInput));
THCudaCheck(cudaGetLastError());
// clean
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
|
18415e7cd5149fe0733aefa905d9cb0ee659d69a.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2023 XGBoost contributors
*/
#if defined(XGBOOST_USE_NCCL)
#include "nccl_device_communicator.cuh"
namespace xgboost {
namespace collective {
NcclDeviceCommunicator::NcclDeviceCommunicator(int device_ordinal)
: device_ordinal_{device_ordinal}, world_size_{GetWorldSize()}, rank_{GetRank()} {
if (device_ordinal_ < 0) {
LOG(FATAL) << "Invalid device ordinal: " << device_ordinal_;
}
if (world_size_ == 1) {
return;
}
std::vector<uint64_t> uuids(world_size_ * kUuidLength, 0);
auto s_uuid = xgboost::common::Span<uint64_t>{uuids.data(), uuids.size()};
auto s_this_uuid = s_uuid.subspan(rank_ * kUuidLength, kUuidLength);
GetCudaUUID(s_this_uuid);
// TODO(rongou): replace this with allgather.
Allreduce(uuids.data(), uuids.size(), DataType::kUInt64, Operation::kSum);
std::vector<xgboost::common::Span<uint64_t, kUuidLength>> converted(world_size_);
size_t j = 0;
for (size_t i = 0; i < uuids.size(); i += kUuidLength) {
converted[j] = xgboost::common::Span<uint64_t, kUuidLength>{uuids.data() + i, kUuidLength};
j++;
}
auto iter = std::unique(converted.begin(), converted.end());
auto n_uniques = std::distance(converted.begin(), iter);
CHECK_EQ(n_uniques, world_size_)
<< "Multiple processes within communication group running on same CUDA "
<< "device is not supported. " << PrintUUID(s_this_uuid) << "\n";
nccl_unique_id_ = GetUniqueId();
dh::safe_cuda(hipSetDevice(device_ordinal_));
dh::safe_nccl(ncclCommInitRank(&nccl_comm_, world_size_, nccl_unique_id_, rank_));
dh::safe_cuda(hipStreamCreate(&cuda_stream_));
}
NcclDeviceCommunicator::~NcclDeviceCommunicator() {
if (world_size_ == 1) {
return;
}
if (cuda_stream_) {
dh::safe_cuda(hipStreamDestroy(cuda_stream_));
}
if (nccl_comm_) {
dh::safe_nccl(ncclCommDestroy(nccl_comm_));
}
if (xgboost::ConsoleLogger::ShouldLog(xgboost::ConsoleLogger::LV::kDebug)) {
LOG(CONSOLE) << "======== NCCL Statistics========";
LOG(CONSOLE) << "AllReduce calls: " << allreduce_calls_;
LOG(CONSOLE) << "AllReduce total MiB communicated: " << allreduce_bytes_ / 1048576;
}
}
namespace {
ncclDataType_t GetNcclDataType(DataType const &data_type) {
ncclDataType_t result{ncclInt8};
switch (data_type) {
case DataType::kInt8:
result = ncclInt8;
break;
case DataType::kUInt8:
result = ncclUint8;
break;
case DataType::kInt32:
result = ncclInt32;
break;
case DataType::kUInt32:
result = ncclUint32;
break;
case DataType::kInt64:
result = ncclInt64;
break;
case DataType::kUInt64:
result = ncclUint64;
break;
case DataType::kFloat:
result = ncclFloat;
break;
case DataType::kDouble:
result = ncclDouble;
break;
default:
LOG(FATAL) << "Unknown data type.";
}
return result;
}
bool IsBitwiseOp(Operation const &op) {
return op == Operation::kBitwiseAND || op == Operation::kBitwiseOR ||
op == Operation::kBitwiseXOR;
}
ncclRedOp_t GetNcclRedOp(Operation const &op) {
ncclRedOp_t result{ncclMax};
switch (op) {
case Operation::kMax:
result = ncclMax;
break;
case Operation::kMin:
result = ncclMin;
break;
case Operation::kSum:
result = ncclSum;
break;
default:
LOG(FATAL) << "Unsupported reduce operation.";
}
return result;
}
template <typename Func>
void RunBitwiseAllreduce(char *out_buffer, char const *device_buffer, Func func, int world_size,
std::size_t size, hipStream_t stream) {
dh::LaunchN(size, stream, [=] __device__(std::size_t idx) {
auto result = device_buffer[idx];
for (auto rank = 1; rank < world_size; rank++) {
result = func(result, device_buffer[rank * size + idx]);
}
out_buffer[idx] = result;
});
}
} // anonymous namespace
void NcclDeviceCommunicator::BitwiseAllReduce(void *send_receive_buffer, std::size_t count,
DataType data_type, Operation op) {
auto const size = count * GetTypeSize(data_type);
dh::caching_device_vector<char> buffer(size * world_size_);
auto *device_buffer = buffer.data().get();
// First gather data from all the workers.
dh::safe_nccl(ncclAllGather(send_receive_buffer, device_buffer, count, GetNcclDataType(data_type),
nccl_comm_, cuda_stream_));
// Then reduce locally.
auto *out_buffer = static_cast<char *>(send_receive_buffer);
switch (op) {
case Operation::kBitwiseAND:
RunBitwiseAllreduce(out_buffer, device_buffer, thrust::bit_and<char>(), world_size_, size,
cuda_stream_);
break;
case Operation::kBitwiseOR:
RunBitwiseAllreduce(out_buffer, device_buffer, thrust::bit_or<char>(), world_size_, size,
cuda_stream_);
break;
case Operation::kBitwiseXOR:
RunBitwiseAllreduce(out_buffer, device_buffer, thrust::bit_xor<char>(), world_size_, size,
cuda_stream_);
break;
default:
LOG(FATAL) << "Not a bitwise reduce operation.";
}
}
void NcclDeviceCommunicator::AllReduce(void *send_receive_buffer, std::size_t count,
DataType data_type, Operation op) {
if (world_size_ == 1) {
return;
}
dh::safe_cuda(hipSetDevice(device_ordinal_));
if (IsBitwiseOp(op)) {
BitwiseAllReduce(send_receive_buffer, count, data_type, op);
} else {
dh::safe_nccl(ncclAllReduce(send_receive_buffer, send_receive_buffer, count,
GetNcclDataType(data_type), GetNcclRedOp(op), nccl_comm_,
cuda_stream_));
}
allreduce_bytes_ += count * GetTypeSize(data_type);
allreduce_calls_ += 1;
}
void NcclDeviceCommunicator::AllGatherV(void const *send_buffer, size_t length_bytes,
std::vector<std::size_t> *segments,
dh::caching_device_vector<char> *receive_buffer) {
if (world_size_ == 1) {
return;
}
dh::safe_cuda(hipSetDevice(device_ordinal_));
segments->clear();
segments->resize(world_size_, 0);
segments->at(rank_) = length_bytes;
Allreduce(segments->data(), segments->size(), DataType::kUInt64, Operation::kMax);
auto total_bytes = std::accumulate(segments->cbegin(), segments->cend(), 0UL);
receive_buffer->resize(total_bytes);
size_t offset = 0;
dh::safe_nccl(ncclGroupStart());
for (int32_t i = 0; i < world_size_; ++i) {
size_t as_bytes = segments->at(i);
dh::safe_nccl(ncclBroadcast(send_buffer, receive_buffer->data().get() + offset, as_bytes,
ncclChar, i, nccl_comm_, cuda_stream_));
offset += as_bytes;
}
dh::safe_nccl(ncclGroupEnd());
}
void NcclDeviceCommunicator::Synchronize() {
if (world_size_ == 1) {
return;
}
dh::safe_cuda(hipSetDevice(device_ordinal_));
dh::safe_cuda(hipStreamSynchronize(cuda_stream_));
}
} // namespace collective
} // namespace xgboost
#endif
| 18415e7cd5149fe0733aefa905d9cb0ee659d69a.cu | /*!
* Copyright 2023 XGBoost contributors
*/
#if defined(XGBOOST_USE_NCCL)
#include "nccl_device_communicator.cuh"
namespace xgboost {
namespace collective {
NcclDeviceCommunicator::NcclDeviceCommunicator(int device_ordinal)
: device_ordinal_{device_ordinal}, world_size_{GetWorldSize()}, rank_{GetRank()} {
if (device_ordinal_ < 0) {
LOG(FATAL) << "Invalid device ordinal: " << device_ordinal_;
}
if (world_size_ == 1) {
return;
}
std::vector<uint64_t> uuids(world_size_ * kUuidLength, 0);
auto s_uuid = xgboost::common::Span<uint64_t>{uuids.data(), uuids.size()};
auto s_this_uuid = s_uuid.subspan(rank_ * kUuidLength, kUuidLength);
GetCudaUUID(s_this_uuid);
// TODO(rongou): replace this with allgather.
Allreduce(uuids.data(), uuids.size(), DataType::kUInt64, Operation::kSum);
std::vector<xgboost::common::Span<uint64_t, kUuidLength>> converted(world_size_);
size_t j = 0;
for (size_t i = 0; i < uuids.size(); i += kUuidLength) {
converted[j] = xgboost::common::Span<uint64_t, kUuidLength>{uuids.data() + i, kUuidLength};
j++;
}
auto iter = std::unique(converted.begin(), converted.end());
auto n_uniques = std::distance(converted.begin(), iter);
CHECK_EQ(n_uniques, world_size_)
<< "Multiple processes within communication group running on same CUDA "
<< "device is not supported. " << PrintUUID(s_this_uuid) << "\n";
nccl_unique_id_ = GetUniqueId();
dh::safe_cuda(cudaSetDevice(device_ordinal_));
dh::safe_nccl(ncclCommInitRank(&nccl_comm_, world_size_, nccl_unique_id_, rank_));
dh::safe_cuda(cudaStreamCreate(&cuda_stream_));
}
NcclDeviceCommunicator::~NcclDeviceCommunicator() {
if (world_size_ == 1) {
return;
}
if (cuda_stream_) {
dh::safe_cuda(cudaStreamDestroy(cuda_stream_));
}
if (nccl_comm_) {
dh::safe_nccl(ncclCommDestroy(nccl_comm_));
}
if (xgboost::ConsoleLogger::ShouldLog(xgboost::ConsoleLogger::LV::kDebug)) {
LOG(CONSOLE) << "======== NCCL Statistics========";
LOG(CONSOLE) << "AllReduce calls: " << allreduce_calls_;
LOG(CONSOLE) << "AllReduce total MiB communicated: " << allreduce_bytes_ / 1048576;
}
}
namespace {
ncclDataType_t GetNcclDataType(DataType const &data_type) {
ncclDataType_t result{ncclInt8};
switch (data_type) {
case DataType::kInt8:
result = ncclInt8;
break;
case DataType::kUInt8:
result = ncclUint8;
break;
case DataType::kInt32:
result = ncclInt32;
break;
case DataType::kUInt32:
result = ncclUint32;
break;
case DataType::kInt64:
result = ncclInt64;
break;
case DataType::kUInt64:
result = ncclUint64;
break;
case DataType::kFloat:
result = ncclFloat;
break;
case DataType::kDouble:
result = ncclDouble;
break;
default:
LOG(FATAL) << "Unknown data type.";
}
return result;
}
bool IsBitwiseOp(Operation const &op) {
return op == Operation::kBitwiseAND || op == Operation::kBitwiseOR ||
op == Operation::kBitwiseXOR;
}
ncclRedOp_t GetNcclRedOp(Operation const &op) {
ncclRedOp_t result{ncclMax};
switch (op) {
case Operation::kMax:
result = ncclMax;
break;
case Operation::kMin:
result = ncclMin;
break;
case Operation::kSum:
result = ncclSum;
break;
default:
LOG(FATAL) << "Unsupported reduce operation.";
}
return result;
}
template <typename Func>
void RunBitwiseAllreduce(char *out_buffer, char const *device_buffer, Func func, int world_size,
std::size_t size, cudaStream_t stream) {
dh::LaunchN(size, stream, [=] __device__(std::size_t idx) {
auto result = device_buffer[idx];
for (auto rank = 1; rank < world_size; rank++) {
result = func(result, device_buffer[rank * size + idx]);
}
out_buffer[idx] = result;
});
}
} // anonymous namespace
void NcclDeviceCommunicator::BitwiseAllReduce(void *send_receive_buffer, std::size_t count,
DataType data_type, Operation op) {
auto const size = count * GetTypeSize(data_type);
dh::caching_device_vector<char> buffer(size * world_size_);
auto *device_buffer = buffer.data().get();
// First gather data from all the workers.
dh::safe_nccl(ncclAllGather(send_receive_buffer, device_buffer, count, GetNcclDataType(data_type),
nccl_comm_, cuda_stream_));
// Then reduce locally.
auto *out_buffer = static_cast<char *>(send_receive_buffer);
switch (op) {
case Operation::kBitwiseAND:
RunBitwiseAllreduce(out_buffer, device_buffer, thrust::bit_and<char>(), world_size_, size,
cuda_stream_);
break;
case Operation::kBitwiseOR:
RunBitwiseAllreduce(out_buffer, device_buffer, thrust::bit_or<char>(), world_size_, size,
cuda_stream_);
break;
case Operation::kBitwiseXOR:
RunBitwiseAllreduce(out_buffer, device_buffer, thrust::bit_xor<char>(), world_size_, size,
cuda_stream_);
break;
default:
LOG(FATAL) << "Not a bitwise reduce operation.";
}
}
void NcclDeviceCommunicator::AllReduce(void *send_receive_buffer, std::size_t count,
DataType data_type, Operation op) {
if (world_size_ == 1) {
return;
}
dh::safe_cuda(cudaSetDevice(device_ordinal_));
if (IsBitwiseOp(op)) {
BitwiseAllReduce(send_receive_buffer, count, data_type, op);
} else {
dh::safe_nccl(ncclAllReduce(send_receive_buffer, send_receive_buffer, count,
GetNcclDataType(data_type), GetNcclRedOp(op), nccl_comm_,
cuda_stream_));
}
allreduce_bytes_ += count * GetTypeSize(data_type);
allreduce_calls_ += 1;
}
void NcclDeviceCommunicator::AllGatherV(void const *send_buffer, size_t length_bytes,
std::vector<std::size_t> *segments,
dh::caching_device_vector<char> *receive_buffer) {
if (world_size_ == 1) {
return;
}
dh::safe_cuda(cudaSetDevice(device_ordinal_));
segments->clear();
segments->resize(world_size_, 0);
segments->at(rank_) = length_bytes;
Allreduce(segments->data(), segments->size(), DataType::kUInt64, Operation::kMax);
auto total_bytes = std::accumulate(segments->cbegin(), segments->cend(), 0UL);
receive_buffer->resize(total_bytes);
size_t offset = 0;
dh::safe_nccl(ncclGroupStart());
for (int32_t i = 0; i < world_size_; ++i) {
size_t as_bytes = segments->at(i);
dh::safe_nccl(ncclBroadcast(send_buffer, receive_buffer->data().get() + offset, as_bytes,
ncclChar, i, nccl_comm_, cuda_stream_));
offset += as_bytes;
}
dh::safe_nccl(ncclGroupEnd());
}
void NcclDeviceCommunicator::Synchronize() {
if (world_size_ == 1) {
return;
}
dh::safe_cuda(cudaSetDevice(device_ordinal_));
dh::safe_cuda(cudaStreamSynchronize(cuda_stream_));
}
} // namespace collective
} // namespace xgboost
#endif
|
3f7084e5f68acce578b1ea14cca0e89a4ed8b0aa.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Implementing the FFT algorithm for general input
* Input should be fp32 vectors with size equals to the power of 4
* Number of vectors is given by BATCH (B)
* Recursive algorithm
* Base case is fft4
*/
// C includes
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
// CUDA includes
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hip/hip_fp16.h>
#include "nvidia_helper/checkCudaErrors.h"
// Matrix and vector
#include "helper/my_vector.h"
#include "helper/my_matrix.h"
#include "helper/my_const.h"
// Utility programs
#include "util/debug_fp32_to_fp16.h"
#include "util/fourier_matrix_4.h"
#include "util/debug_fft4.h"
#define PI 3.14159265
const float UPPER_BOUND = 1.0f;
const int BATCH = 4;
const int SIZE = 16;
extern fft::MatrixH F4_re;
extern fft::MatrixH F4_im;
fft::MatrixF buffer_m1;
fft::MatrixF buffer_m2;
float* buffer;
__global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im)
{
/*
* Multifly every element of the input matrix with twiddle factor
* Block and thread layout should be 2D
* Re.element(i, j) [0 based] = xre * cos(2pi/N * i * j) + xim * sin(2pi/N * i * j)
* Im.element(i, j) [0 based] = -xre * sin(2pi/N * i * j) + xim * cos(2pi/N * i * j)
* */
// Calculate position (0 based)
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (i < m && j < n){
// Per-thread local variables
int index = j * m + i;
float tw_re = cos(2 * PI / N * i * j);
float tw_im = sin(2 * PI / N * i * j);
float result_re = matrix_re[index] * tw_re + matrix_im[index] * tw_im;
float result_im = -1.0f * matrix_re[index] * tw_im + matrix_im[index] * tw_re;
matrix_re[index] = result_re;
matrix_im[index] = result_im;
}
}
FFT_S gfft(int N, int B, fft::MatrixF& X_re, fft::MatrixF& X_im, fft::MatrixF& FX_re, fft::MatrixF& FX_im)
{
FFT_S fft_status;
printf("_____calling gfft______: \n N=%d, B=%d\n", N, B);
for (int j = 1; j <= B; j++){
printf("Input vector %d: \n", j);
for (int i = 1; i <= N; i++){
printf("X[%d] = (%.10f, %.10f) \n", i, X_re.element(i, j), X_im.element(i, j));
}
}
if (N == 4) {
fft_status = fft4(B, X_re, X_im, FX_re, FX_im);
return fft_status;
}
// cublas variable declaration
hipblasStatus_t status;
hipblasHandle_t handle;
// Scaling variables
float alpha = 1.0f, beta = 0.0f;
// Temporary variables for intermediate result swapping
float* temp;
// Initialize cublas
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return FFT_FAILURE;
}
// Reshape input and output matrix: (N -(Reshape)->4*(N/4)) * B
X_re.width = X_re.width * N / 4; X_re.height = 4;
X_im.width = X_im.width * N / 4; X_im.height = 4;
FX_re.width = FX_re.width * N / 4; FX_re.height = 4;
FX_im.width = FX_im.width * N / 4; FX_im.height = 4;
// Transpose input matrix: (4*(N/4) -(Transpose)-> (N/4)*4) * B
// Store temporary result first in buffer, then in FX_re.array and FX_im.array
//// Real matrix
for (int j = 0; j < B; j++){
status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, N/4, 4, &alpha, X_re.array + j * N, 4,
&beta, X_re.array + j * N, 4, buffer + j * N, N/4);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (transpose real input).\n");
return FFT_FAILURE;
}
}
////// Swap FX_re.array and buffer to store the transposition result in FX_re.array
temp = FX_re.array; FX_re.array = buffer; buffer = temp;
//// Imaginary matrix
for (int j = 0; j < B; j++){
status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, N/4, 4, &alpha, X_im.array + j * N, 4,
&beta, X_im.array + j * N, 4, buffer + j * N, N/4);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (transpose imaginary input).\n");
return FFT_FAILURE;
}
}
////// Swap FX_im.array and buffer to store the transposition result in FX_im.array
temp = FX_im.array; FX_im.array = buffer; buffer = temp;
hipDeviceSynchronize();
// Recursively call gfft function
buffer_m1.width =4; buffer_m2.width = 4; buffer_m1.height = 4; buffer_m2.height = 4;
fft_status = gfft(N / 4, 4 * B, FX_re, FX_im, buffer_m1, buffer_m2);
if (fft_status != FFT_SUCCESS){
fprintf(stderr, "!!!!! Execution error (recursively call gfft).\n");
return FFT_FAILURE;
}
temp = FX_re.array; FX_re.array = buffer_m1.array; buffer_m1.array = temp;
temp = FX_im.array; FX_im.array = buffer_m2.array; buffer_m2.array = temp;
printf("_____After recursive______: \n");
for (int j = 1; j <= B; j++){
printf("Resulting vector %d: \n", j);
for (int i = 0; i < N; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, FX_re.array[i], FX_im.array[i]);
}
}
// Multiplication with twiddle factors
//// Set grid and block size
dim3 threadsPerBlock(4, 16);
dim3 numBlocks(1, (N + 63)/64); // Make sure blocks are enough
//// Call kernel function
for (int j = 0; j < B; j++){
hipLaunchKernelGGL(( multiply_twiddle), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, N, N/4, 4, FX_re.array + j * N, FX_im.array + j * N);
}
printf("_____After combination______: \n");
for (int j = 1; j <= B; j++){
printf("Resulting vector %d: \n", j);
for (int i = 0; i < N; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, FX_re.array[i], FX_im.array[i]);
}
}
hipDeviceSynchronize();
// Transpose the matrix again
// Store temporary result first in buffer, then in FX_re.array and FX_im.array
//// Real matrix
for (int j = 0; j < B; j++){
status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, 4, N/4, &alpha, FX_re.array + j * N, N/4,
&beta, FX_re.array + j * N, N/4, buffer + j * N, 4);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (intermediate transpose real).\n");
return FFT_FAILURE;
}
}
////// Swap FX_re.array and buffer to store the transposition result in FX_re.array
temp = FX_re.array; FX_re.array = buffer; buffer = temp;
//// Imaginary matrix
for (int j = 0; j < B; j++){
status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, 4, N/4, &alpha, FX_im.array + j * N, N/4,
&beta, FX_im.array + j * N, N/4, buffer + j * N, 4);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (intermediate transpose imaginary).\n");
return FFT_FAILURE;
}
}
////// Swap FX_im.array and buffer to store the transposition result in FX_im.array
temp = FX_im.array; FX_im.array = buffer; buffer = temp;
hipDeviceSynchronize();
printf("_____After Second Transpose______: \n");
for (int j = 1; j <= B; j++){
printf("Resulting vector %d: \n", j);
for (int i = 0; i < N; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, FX_re.array[i], FX_im.array[i]);
}
}
// Call fft4
buffer_m1.width =4; buffer_m2.width = 4; buffer_m1.height = 4; buffer_m2.height = 4;
printf("Size: %d, %d, %d, %d, %d, %d, %d, %d\n", FX_re.height, FX_re.width, FX_im.height, FX_im.width, buffer_m1.height, buffer_m1.width, buffer_m2.height, buffer_m1.width);
printf("_____Before final fft4______: \n");
for (int j = 1; j <= B; j++){
printf("Resulting vector %d: \n", j);
for (int i = 1; i <= N; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, FX_re.element(i, j), FX_im.element(i, j));
}
}
fft_status = fft4(N / 4 * B, FX_re, FX_im, buffer_m1, buffer_m2);
if (fft_status != FFT_SUCCESS){
fprintf(stderr, "!!!!! Execution error (combine step calling fft4).\n");
return FFT_FAILURE;
}
temp = FX_re.array; FX_re.array = buffer_m1.array; buffer_m1.array = temp;
temp = FX_im.array; FX_im.array = buffer_m2.array; buffer_m2.array = temp;
printf("_____After final fft4______: \n");
for (int j = 1; j <= B; j++){
printf("Resulting vector %d: \n", j);
for (int i = 1; i <= N; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, FX_re.element(i, j), FX_im.element(i, j));
}
}
// Do the final transpose to get the output
//// Real matrix
for (int j = 0; j < B; j++){
status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, N/4, 4, &alpha, FX_re.array + j * N, 4,
&beta, FX_re.array + j * N, 4, buffer + j * N, N/4);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (final transpose real).\n");
return FFT_FAILURE;
}
}
////// Swap FX_re.array and buffer to store the transposition result in FX_re.array
temp = FX_re.array; FX_re.array = buffer; buffer = temp;
//// Imaginary matrix
for (int j = 0; j < B; j++){
status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, N/4, 4, &alpha, FX_im.array + j * N, 4,
&beta, FX_im.array + j * N, 4, buffer + j * N, N/4);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (final transpose imaginary).\n");
return FFT_FAILURE;
}
}
////// Swap FX_im.array and buffer to store the transposition result in FX_im.array
temp = FX_im.array; FX_im.array = buffer; buffer = temp;
hipDeviceSynchronize();
// Reshape back input and output matrix: (4*(N/4) --Reshape--> N) * B
X_re.width = X_re.width * 4 / N; X_re.height = N;
X_im.width = X_im.width * 4 / N; X_im.height = N;
FX_re.width = FX_re.width * 4 / N; FX_re.height = N;
FX_im.width = FX_im.width * 4 / N; FX_im.height = N;
// Shutdown cublas
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return FFT_FAILURE;
}
return FFT_SUCCESS;
}
int main()
{
int mem_size;
// allocate unified memory for input matrix
fft::MatrixF input_re;
input_re.width = BATCH;
input_re.height = SIZE;
mem_size = input_re.width * input_re.height * sizeof(float);
checkCudaErrors(hipMallocManaged((void **) &(input_re.array), mem_size));
fft::MatrixF input_im;
input_im.width = BATCH;
input_im.height = SIZE;
mem_size = input_im.width * input_im.height * sizeof(float);
checkCudaErrors(hipMallocManaged((void **) &(input_im.array), mem_size));
// Initialize the input matrix
srand(time(NULL));
printf("The input is: \n");
for (int j = 1; j <= BATCH; j++){
printf("Vector %d: \n", j);
for (int i = 1; i <= SIZE; i++){
input_re.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
input_im.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
input_re.element(i, j) = (float)1.0f;
input_im.element(i, j) = (float)0.0f;
printf("X[%d] = (%.10f, %.10f) \n", i, input_re.element(i, j), input_im.element(i, j));
}
printf("\n");
}
// allocate unified memory for output matrix
fft::MatrixF output_re;
output_re.width = BATCH;
output_re.height = SIZE;
mem_size = output_re.width * output_re.height * sizeof(float);
checkCudaErrors(hipMallocManaged((void **) &(output_re.array), mem_size));
fft::MatrixF output_im;
output_im.width = BATCH;
output_im.height = SIZE;
mem_size = output_im.width * output_im.height * sizeof(float);
checkCudaErrors(hipMallocManaged((void **) &(output_im.array), mem_size));
// allocate unified memory for the buffer (array of float)
mem_size = SIZE * BATCH * sizeof(float);
checkCudaErrors(hipMallocManaged((void **) &buffer, mem_size));
buffer_m1.width = BATCH;
buffer_m1.height = SIZE;
mem_size = buffer_m1.width * buffer_m1.height * sizeof(float);
checkCudaErrors(hipMallocManaged((void **) &(buffer_m1.array), mem_size));
buffer_m2.width = BATCH;
buffer_m2.height = SIZE;
mem_size = buffer_m2.width * buffer_m2.height * sizeof(float);
checkCudaErrors(hipMallocManaged((void **) &(buffer_m2.array), mem_size));
FFT_S status;
status = init_F4();
if (status != FFT_SUCCESS){
fprintf(stderr, "!!!!! Matrix initialization error (init Fourier matrix).\n");
return FFT_FAILURE;
}
status = gfft(SIZE, BATCH, input_re, input_im, output_re, output_im);
if (status != FFT_SUCCESS){
printf("Error in running fft algorithm\n");
exit(1);
}
printf("Result: \n");
for (int j = 1; j <= BATCH; j++){
printf("Resulting vector %d: \n", j);
for (int i = 1; i <= SIZE; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, output_re.element(i, j), output_im.element(i, j));
}
}
checkCudaErrors(hipFree(input_re.array));
checkCudaErrors(hipFree(input_im.array));
checkCudaErrors(hipFree(output_re.array));
checkCudaErrors(hipFree(output_im.array));
}
| 3f7084e5f68acce578b1ea14cca0e89a4ed8b0aa.cu | /*
* Implementing the FFT algorithm for general input
* Input should be fp32 vectors with size equals to the power of 4
* Number of vectors is given by BATCH (B)
* Recursive algorithm
* Base case is fft4
*/
// C includes
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
// CUDA includes
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda_fp16.h>
#include "nvidia_helper/checkCudaErrors.h"
// Matrix and vector
#include "helper/my_vector.h"
#include "helper/my_matrix.h"
#include "helper/my_const.h"
// Utility programs
#include "util/debug_fp32_to_fp16.h"
#include "util/fourier_matrix_4.h"
#include "util/debug_fft4.h"
#define PI 3.14159265
const float UPPER_BOUND = 1.0f;
const int BATCH = 4;
const int SIZE = 16;
extern fft::MatrixH F4_re;
extern fft::MatrixH F4_im;
fft::MatrixF buffer_m1;
fft::MatrixF buffer_m2;
float* buffer;
__global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im)
{
/*
* Multifly every element of the input matrix with twiddle factor
* Block and thread layout should be 2D
* Re.element(i, j) [0 based] = xre * cos(2pi/N * i * j) + xim * sin(2pi/N * i * j)
* Im.element(i, j) [0 based] = -xre * sin(2pi/N * i * j) + xim * cos(2pi/N * i * j)
* */
// Calculate position (0 based)
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (i < m && j < n){
// Per-thread local variables
int index = j * m + i;
float tw_re = cos(2 * PI / N * i * j);
float tw_im = sin(2 * PI / N * i * j);
float result_re = matrix_re[index] * tw_re + matrix_im[index] * tw_im;
float result_im = -1.0f * matrix_re[index] * tw_im + matrix_im[index] * tw_re;
matrix_re[index] = result_re;
matrix_im[index] = result_im;
}
}
FFT_S gfft(int N, int B, fft::MatrixF& X_re, fft::MatrixF& X_im, fft::MatrixF& FX_re, fft::MatrixF& FX_im)
{
FFT_S fft_status;
printf("_____calling gfft______: \n N=%d, B=%d\n", N, B);
for (int j = 1; j <= B; j++){
printf("Input vector %d: \n", j);
for (int i = 1; i <= N; i++){
printf("X[%d] = (%.10f, %.10f) \n", i, X_re.element(i, j), X_im.element(i, j));
}
}
if (N == 4) {
fft_status = fft4(B, X_re, X_im, FX_re, FX_im);
return fft_status;
}
// cublas variable declaration
cublasStatus_t status;
cublasHandle_t handle;
// Scaling variables
float alpha = 1.0f, beta = 0.0f;
// Temporary variables for intermediate result swapping
float* temp;
// Initialize cublas
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return FFT_FAILURE;
}
// Reshape input and output matrix: (N -(Reshape)->4*(N/4)) * B
X_re.width = X_re.width * N / 4; X_re.height = 4;
X_im.width = X_im.width * N / 4; X_im.height = 4;
FX_re.width = FX_re.width * N / 4; FX_re.height = 4;
FX_im.width = FX_im.width * N / 4; FX_im.height = 4;
// Transpose input matrix: (4*(N/4) -(Transpose)-> (N/4)*4) * B
// Store temporary result first in buffer, then in FX_re.array and FX_im.array
//// Real matrix
for (int j = 0; j < B; j++){
status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, N/4, 4, &alpha, X_re.array + j * N, 4,
&beta, X_re.array + j * N, 4, buffer + j * N, N/4);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (transpose real input).\n");
return FFT_FAILURE;
}
}
////// Swap FX_re.array and buffer to store the transposition result in FX_re.array
temp = FX_re.array; FX_re.array = buffer; buffer = temp;
//// Imaginary matrix
for (int j = 0; j < B; j++){
status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, N/4, 4, &alpha, X_im.array + j * N, 4,
&beta, X_im.array + j * N, 4, buffer + j * N, N/4);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (transpose imaginary input).\n");
return FFT_FAILURE;
}
}
////// Swap FX_im.array and buffer to store the transposition result in FX_im.array
temp = FX_im.array; FX_im.array = buffer; buffer = temp;
cudaDeviceSynchronize();
// Recursively call gfft function
buffer_m1.width =4; buffer_m2.width = 4; buffer_m1.height = 4; buffer_m2.height = 4;
fft_status = gfft(N / 4, 4 * B, FX_re, FX_im, buffer_m1, buffer_m2);
if (fft_status != FFT_SUCCESS){
fprintf(stderr, "!!!!! Execution error (recursively call gfft).\n");
return FFT_FAILURE;
}
temp = FX_re.array; FX_re.array = buffer_m1.array; buffer_m1.array = temp;
temp = FX_im.array; FX_im.array = buffer_m2.array; buffer_m2.array = temp;
printf("_____After recursive______: \n");
for (int j = 1; j <= B; j++){
printf("Resulting vector %d: \n", j);
for (int i = 0; i < N; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, FX_re.array[i], FX_im.array[i]);
}
}
// Multiplication with twiddle factors
//// Set grid and block size
dim3 threadsPerBlock(4, 16);
dim3 numBlocks(1, (N + 63)/64); // Make sure blocks are enough
//// Call kernel function
for (int j = 0; j < B; j++){
multiply_twiddle<<<numBlocks, threadsPerBlock>>>(N, N/4, 4, FX_re.array + j * N, FX_im.array + j * N);
}
printf("_____After combination______: \n");
for (int j = 1; j <= B; j++){
printf("Resulting vector %d: \n", j);
for (int i = 0; i < N; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, FX_re.array[i], FX_im.array[i]);
}
}
cudaDeviceSynchronize();
// Transpose the matrix again
// Store temporary result first in buffer, then in FX_re.array and FX_im.array
//// Real matrix
for (int j = 0; j < B; j++){
status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, 4, N/4, &alpha, FX_re.array + j * N, N/4,
&beta, FX_re.array + j * N, N/4, buffer + j * N, 4);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (intermediate transpose real).\n");
return FFT_FAILURE;
}
}
////// Swap FX_re.array and buffer to store the transposition result in FX_re.array
temp = FX_re.array; FX_re.array = buffer; buffer = temp;
//// Imaginary matrix
for (int j = 0; j < B; j++){
status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, 4, N/4, &alpha, FX_im.array + j * N, N/4,
&beta, FX_im.array + j * N, N/4, buffer + j * N, 4);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (intermediate transpose imaginary).\n");
return FFT_FAILURE;
}
}
////// Swap FX_im.array and buffer to store the transposition result in FX_im.array
temp = FX_im.array; FX_im.array = buffer; buffer = temp;
cudaDeviceSynchronize();
printf("_____After Second Transpose______: \n");
for (int j = 1; j <= B; j++){
printf("Resulting vector %d: \n", j);
for (int i = 0; i < N; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, FX_re.array[i], FX_im.array[i]);
}
}
// Call fft4
buffer_m1.width =4; buffer_m2.width = 4; buffer_m1.height = 4; buffer_m2.height = 4;
printf("Size: %d, %d, %d, %d, %d, %d, %d, %d\n", FX_re.height, FX_re.width, FX_im.height, FX_im.width, buffer_m1.height, buffer_m1.width, buffer_m2.height, buffer_m1.width);
printf("_____Before final fft4______: \n");
for (int j = 1; j <= B; j++){
printf("Resulting vector %d: \n", j);
for (int i = 1; i <= N; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, FX_re.element(i, j), FX_im.element(i, j));
}
}
fft_status = fft4(N / 4 * B, FX_re, FX_im, buffer_m1, buffer_m2);
if (fft_status != FFT_SUCCESS){
fprintf(stderr, "!!!!! Execution error (combine step calling fft4).\n");
return FFT_FAILURE;
}
temp = FX_re.array; FX_re.array = buffer_m1.array; buffer_m1.array = temp;
temp = FX_im.array; FX_im.array = buffer_m2.array; buffer_m2.array = temp;
printf("_____After final fft4______: \n");
for (int j = 1; j <= B; j++){
printf("Resulting vector %d: \n", j);
for (int i = 1; i <= N; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, FX_re.element(i, j), FX_im.element(i, j));
}
}
// Do the final transpose to get the output
//// Real matrix
for (int j = 0; j < B; j++){
status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, N/4, 4, &alpha, FX_re.array + j * N, 4,
&beta, FX_re.array + j * N, 4, buffer + j * N, N/4);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (final transpose real).\n");
return FFT_FAILURE;
}
}
////// Swap FX_re.array and buffer to store the transposition result in FX_re.array
temp = FX_re.array; FX_re.array = buffer; buffer = temp;
//// Imaginary matrix
for (int j = 0; j < B; j++){
status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, N/4, 4, &alpha, FX_im.array + j * N, 4,
&beta, FX_im.array + j * N, 4, buffer + j * N, N/4);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (final transpose imaginary).\n");
return FFT_FAILURE;
}
}
////// Swap FX_im.array and buffer to store the transposition result in FX_im.array
temp = FX_im.array; FX_im.array = buffer; buffer = temp;
cudaDeviceSynchronize();
// Reshape back input and output matrix: (4*(N/4) --Reshape--> N) * B
X_re.width = X_re.width * 4 / N; X_re.height = N;
X_im.width = X_im.width * 4 / N; X_im.height = N;
FX_re.width = FX_re.width * 4 / N; FX_re.height = N;
FX_im.width = FX_im.width * 4 / N; FX_im.height = N;
// Shutdown cublas
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return FFT_FAILURE;
}
return FFT_SUCCESS;
}
int main()
{
int mem_size;
// allocate unified memory for input matrix
fft::MatrixF input_re;
input_re.width = BATCH;
input_re.height = SIZE;
mem_size = input_re.width * input_re.height * sizeof(float);
checkCudaErrors(cudaMallocManaged((void **) &(input_re.array), mem_size));
fft::MatrixF input_im;
input_im.width = BATCH;
input_im.height = SIZE;
mem_size = input_im.width * input_im.height * sizeof(float);
checkCudaErrors(cudaMallocManaged((void **) &(input_im.array), mem_size));
// Initialize the input matrix
srand(time(NULL));
printf("The input is: \n");
for (int j = 1; j <= BATCH; j++){
printf("Vector %d: \n", j);
for (int i = 1; i <= SIZE; i++){
input_re.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
input_im.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
input_re.element(i, j) = (float)1.0f;
input_im.element(i, j) = (float)0.0f;
printf("X[%d] = (%.10f, %.10f) \n", i, input_re.element(i, j), input_im.element(i, j));
}
printf("\n");
}
// allocate unified memory for output matrix
fft::MatrixF output_re;
output_re.width = BATCH;
output_re.height = SIZE;
mem_size = output_re.width * output_re.height * sizeof(float);
checkCudaErrors(cudaMallocManaged((void **) &(output_re.array), mem_size));
fft::MatrixF output_im;
output_im.width = BATCH;
output_im.height = SIZE;
mem_size = output_im.width * output_im.height * sizeof(float);
checkCudaErrors(cudaMallocManaged((void **) &(output_im.array), mem_size));
// allocate unified memory for the buffer (array of float)
mem_size = SIZE * BATCH * sizeof(float);
checkCudaErrors(cudaMallocManaged((void **) &buffer, mem_size));
buffer_m1.width = BATCH;
buffer_m1.height = SIZE;
mem_size = buffer_m1.width * buffer_m1.height * sizeof(float);
checkCudaErrors(cudaMallocManaged((void **) &(buffer_m1.array), mem_size));
buffer_m2.width = BATCH;
buffer_m2.height = SIZE;
mem_size = buffer_m2.width * buffer_m2.height * sizeof(float);
checkCudaErrors(cudaMallocManaged((void **) &(buffer_m2.array), mem_size));
FFT_S status;
status = init_F4();
if (status != FFT_SUCCESS){
fprintf(stderr, "!!!!! Matrix initialization error (init Fourier matrix).\n");
return FFT_FAILURE;
}
status = gfft(SIZE, BATCH, input_re, input_im, output_re, output_im);
if (status != FFT_SUCCESS){
printf("Error in running fft algorithm\n");
exit(1);
}
printf("Result: \n");
for (int j = 1; j <= BATCH; j++){
printf("Resulting vector %d: \n", j);
for (int i = 1; i <= SIZE; i++){
printf("FX[%d] = (%.10f, %.10f) \n", i, output_re.element(i, j), output_im.element(i, j));
}
}
checkCudaErrors(cudaFree(input_re.array));
checkCudaErrors(cudaFree(input_im.array));
checkCudaErrors(cudaFree(output_re.array));
checkCudaErrors(cudaFree(output_im.array));
}
|
d0a99a02266c9e73da45e1d4c59cfd92404678c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MatrixMult.h"
#include "MatrixMult.cu.h"
//WIDTH_A = U, HEIGHT_A = M, WIDTH_B = N
#define WIDTH_A 1024 //1024 //1024//2048
#define HEIGHT_A 1024 //2048//2048//2048
#define WIDTH_B 1024 //1536//4096//2048
#define TILE 16
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main() {
// set seed for rand()
srand(2006);
// 1. allocate host memory for the two matrices
unsigned int size_A = WIDTH_A * HEIGHT_A;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WIDTH_B * WIDTH_A;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
// 2. initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// 3. allocate device memory
float* d_A;
float* d_B;
hipMalloc((void**) &d_A, mem_size_A);
hipMalloc((void**) &d_B, mem_size_B);
// 4. copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
// 5. allocate host memory for the result C
unsigned int size_C = HEIGHT_A * WIDTH_B;
unsigned int mem_size_C = sizeof(float) * size_C;
float* h_C = (float*) malloc(mem_size_C);
float* seq_C = (float*) malloc(mem_size_C);
// 6. allocate device memory for the result
float* d_C;
hipMalloc((void**) &d_C, mem_size_C);
// 7. compute sequential matrix multiplication
{
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
//WIDTH_A = U, HEIGHT_A = M, WIDTH_B = N
matMult<float>(h_A, h_B, seq_C, WIDTH_A, HEIGHT_A, WIDTH_B);
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Sequential Naive version runs in: %lu microsecs\n", elapsed);
}
//WIDTH_A = U, HEIGHT_A = M, WIDTH_B = N
// 8. perform the calculation
// setup execution parameters
int dimy = ceil( ((float)HEIGHT_A) / TILE ); // CHANGE THIS
int dimx = ceil( ((float)WIDTH_B) / TILE ); // CHANGE THIS
dim3 block(TILE, TILE, 1);
dim3 grid (dimx, dimy, 1);
// execute the kernel
{
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
hipLaunchKernelGGL(( matMultKer<float>) , dim3(grid), dim3(block) , 0, 0, d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B);
//matMultTiledKer<float,TILE> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B);
hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("GPU version runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
// 11. copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
// 12. validate
validate<float>(seq_C, h_C, size_C);
// 7. clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
| d0a99a02266c9e73da45e1d4c59cfd92404678c2.cu | #include "MatrixMult.h"
#include "MatrixMult.cu.h"
//WIDTH_A = U, HEIGHT_A = M, WIDTH_B = N
#define WIDTH_A 1024 //1024 //1024//2048
#define HEIGHT_A 1024 //2048//2048//2048
#define WIDTH_B 1024 //1536//4096//2048
#define TILE 16
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main() {
// set seed for rand()
srand(2006);
// 1. allocate host memory for the two matrices
unsigned int size_A = WIDTH_A * HEIGHT_A;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WIDTH_B * WIDTH_A;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
// 2. initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// 3. allocate device memory
float* d_A;
float* d_B;
cudaMalloc((void**) &d_A, mem_size_A);
cudaMalloc((void**) &d_B, mem_size_B);
// 4. copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
// 5. allocate host memory for the result C
unsigned int size_C = HEIGHT_A * WIDTH_B;
unsigned int mem_size_C = sizeof(float) * size_C;
float* h_C = (float*) malloc(mem_size_C);
float* seq_C = (float*) malloc(mem_size_C);
// 6. allocate device memory for the result
float* d_C;
cudaMalloc((void**) &d_C, mem_size_C);
// 7. compute sequential matrix multiplication
{
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
//WIDTH_A = U, HEIGHT_A = M, WIDTH_B = N
matMult<float>(h_A, h_B, seq_C, WIDTH_A, HEIGHT_A, WIDTH_B);
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Sequential Naive version runs in: %lu microsecs\n", elapsed);
}
//WIDTH_A = U, HEIGHT_A = M, WIDTH_B = N
// 8. perform the calculation
// setup execution parameters
int dimy = ceil( ((float)HEIGHT_A) / TILE ); // CHANGE THIS
int dimx = ceil( ((float)WIDTH_B) / TILE ); // CHANGE THIS
dim3 block(TILE, TILE, 1);
dim3 grid (dimx, dimy, 1);
// execute the kernel
{
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
matMultKer<float> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B);
//matMultTiledKer<float,TILE> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B);
cudaThreadSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("GPU version runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
// 11. copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
// 12. validate
validate<float>(seq_C, h_C, size_C);
// 7. clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
320e8a20b7b7bc25df313b36f0635ce7e8d5c804.hip | // !!! This is a file automatically generated by hipify!!!
/*
The contents of this file are dedicated by all of its authors, including
Michael S. Gashler,
anonymous contributors,
to the public domain (http://creativecommons.org/publicdomain/zero/1.0/).
Note that some moral obligations still exist in the absence of legal ones.
For example, it would still be dishonest to deliberately misrepresent the
origin of a work. Although we impose no legal requirements to obtain a
license, it is beseeming for those who build on the works of others to
give back useful improvements, or find a way to pay it forward. If
you would like to cite us, a published paper about Waffles can be found
at http://jmlr.org/papers/volume12/gashler11a/gashler11a.pdf. If you find
our code to be useful, the Waffles team would love to hear how you use it.
*/
#include "GCudaMatrix.h"
#include "../../GClasses/GError.h"
#include "../../GClasses/GMatrix.h"
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
namespace GClasses {
bool g_haveEngine = false;
GCudaEngine::GCudaEngine()
{
if(g_haveEngine)
throw Ex("There should only be one GCudaEngine in existence at any time");
g_haveEngine = true;
if(hipblasCreate((hipblasHandle_t*)&m_handle) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasCreate failed");
m_blockSize = 64;
if(hiprandCreateGenerator((hiprandGenerator_t*)&m_prng, HIPRAND_RNG_PSEUDO_DEFAULT) != HIPRAND_STATUS_SUCCESS)
throw Ex("hiprandCreateGenerator failed");
if(hiprandSetPseudoRandomGeneratorSeed((hiprandGenerator_t)m_prng, 1234ULL) != HIPRAND_STATUS_SUCCESS)
throw Ex("hiprandSetPseudoRandomGeneratorSeed failed");
m_hogWild = false;
}
GCudaEngine::~GCudaEngine()
{
if(hipblasDestroy((hipblasHandle_t)m_handle) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDestroy failed");
g_haveEngine = false;
}
void GCudaEngine::sync()
{
if(!m_hogWild)
{
if(hipDeviceSynchronize() != hipSuccess)
throw Ex(hipGetErrorString(hipGetLastError()));
}
}
GCudaVector::GCudaVector()
: m_size(0), d_vals(NULL)
{
}
GCudaVector::~GCudaVector()
{
if(d_vals)
hipFree(d_vals);
}
void GCudaVector::resize(size_t size)
{
if(d_vals)
hipFree(d_vals);
if(hipMalloc((void**)&d_vals, size * sizeof(double)) != hipSuccess)
throw Ex(hipGetErrorString(hipGetLastError()));
m_size = size;
}
void GCudaVector::upload(const double* pHostVector, size_t size)
{
if(m_size != size)
resize(size);
if(hipblasSetVector(m_size, sizeof(double), pHostVector, 1, d_vals, 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasSetVector failed");
}
void GCudaVector::download(double* pOutHostVector)
{
if(hipblasGetVector(m_size, sizeof(double), d_vals, 1, pOutHostVector, 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasGetVector failed");
}
void GCudaVector::copy(GCudaEngine& engine, const GCudaVector& that)
{
if(m_size != that.m_size)
resize(that.m_size);
if(hipblasDcopy((hipblasHandle_t)engine.m_handle, m_size, that.d_vals, 1, d_vals, 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDcopy failed");
}
void GCudaVector::add(GCudaEngine& engine, GCudaVector& that, double thatScalar)
{
GAssert(m_size == that.m_size);
if(hipblasDaxpy((hipblasHandle_t)engine.m_handle, m_size, &thatScalar,
that.d_vals, 1, d_vals, 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDaxpy failed");
}
void GCudaVector::scale(GCudaEngine& engine, double scalar)
{
if(hipblasDscal((hipblasHandle_t)engine.m_handle, m_size, &scalar, d_vals, 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDscal failed");
}
void GCudaVector::randomUniform(GCudaEngine& engine)
{
if(hiprandGenerateUniformDouble((hiprandGenerator_t)engine.m_prng, d_vals, m_size) != HIPRAND_STATUS_SUCCESS)
throw Ex("hiprandGenerateUniformDouble failed");
}
void GCudaVector::randomNormal(GCudaEngine& engine, double mean, double dev)
{
if(hiprandGenerateNormalDouble((hiprandGenerator_t)engine.m_prng, d_vals, m_size, mean, dev) != HIPRAND_STATUS_SUCCESS)
throw Ex("hiprandGenerateNormalDouble failed");
}
GCudaMatrix::GCudaMatrix()
: m_rows(0), m_cols(0), d_vals(NULL)
{
}
GCudaMatrix::~GCudaMatrix()
{
if(d_vals)
hipFree(d_vals);
}
void GCudaMatrix::resize(size_t rows, size_t cols)
{
if(d_vals)
hipFree(d_vals);
if(hipMalloc((void**)&d_vals, rows * cols * sizeof(double)) != hipSuccess)
throw Ex(hipGetErrorString(hipGetLastError()));
m_rows = rows;
m_cols = cols;
}
void GCudaMatrix::upload(const GMatrix& m)
{
if(m_rows != m.rows() || m_cols != m.cols())
resize(m.rows(), m.cols());
double* pVals = d_vals;
for(size_t i = 0; i < m_rows; i++)
{
if(hipblasSetVector(m_cols, sizeof(double), m[i], 1, pVals, 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasSetVector failed");
pVals += m_cols;
}
}
void GCudaMatrix::download(GMatrix& m)
{
if(m.rows() != m_rows || m.cols() != m_cols)
m.resize(m_rows, m_cols);
double* pVals = d_vals;
for(size_t i = 0; i < m_rows; i++)
{
if(hipblasGetVector(m_cols, sizeof(double), pVals, 1, m[i], 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasGetVector failed");
pVals += m_cols;
}
}
void GCudaMatrix::scale(GCudaEngine& engine, double scalar)
{
if(hipblasDscal((hipblasHandle_t)engine.m_handle, m_rows * m_cols, &scalar, d_vals, 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDscal failed");
}
void GCudaMatrix::add(GCudaEngine& engine, GCudaMatrix& that, double thatScalar)
{
GAssert(m.rows() == m_rows && m.cols() == m_cols);
if(hipblasDaxpy((hipblasHandle_t)engine.m_handle, m_rows * m_cols, &thatScalar,
that.d_vals, 1, d_vals, 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDaxpy failed");
}
void GCudaMatrix::rowVectorTimesThis(GCudaEngine& engine, const GCudaVector& in, GCudaVector& out)
{
GAssert(in.m_size == m_rows);
if(out.size() != m_cols)
out.resize(m_cols);
double alpha = 1.0f;
double beta = 0.0f;
if(hipblasDgemv((hipblasHandle_t)engine.m_handle, HIPBLAS_OP_N,
m_cols, m_rows, &alpha, d_vals,
m_cols, in.d_vals, 1, &beta, out.d_vals, 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDgemv failed");
}
void GCudaMatrix::thisTimesColumnVector(GCudaEngine& engine, const GCudaVector& in, GCudaVector& out)
{
GAssert(in.m_size == m_cols);
if(out.m_size != m_rows)
out.resize(m_rows);
double alpha = 1.0f;
double beta = 0.0f;
if(hipblasDgemv((hipblasHandle_t)engine.m_handle, HIPBLAS_OP_T,
m_cols, m_rows, &alpha, d_vals,
m_cols, in.d_vals, 1, &beta, out.d_vals, 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDgemv failed");
}
void GCudaMatrix::feedIn(GCudaEngine& engine, const GCudaVector& in, GCudaVector& out, size_t inputStart)
{
GAssert(inputStart + in.m_size <= m_rows);
GAssert(out.m_size == m_cols);
double alpha = 1.0f;
double beta = 1.0f;
if(hipblasDgemv((hipblasHandle_t)engine.m_handle, HIPBLAS_OP_N,
m_cols, in.m_size, &alpha, d_vals + inputStart * m_cols,
m_cols, in.d_vals, 1, &beta, out.d_vals, 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDgemv failed");
}
void GCudaMatrix::backPropError(GCudaEngine& engine, const GCudaVector& in, GCudaVector& out, size_t inputStart)
{
GAssert(in.m_size == m_cols);
double alpha = 1.0f;
double beta = 0.0f;
if(hipblasDgemv((hipblasHandle_t)engine.m_handle, HIPBLAS_OP_T,
in.size(), m_rows, &alpha, d_vals,
m_cols, in.d_vals + inputStart, 1, &beta, out.d_vals, 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDgemv failed");
}
void GCudaMatrix::addOuterProduct(GCudaEngine& engine, GCudaVector& upStreamInput, GCudaVector& downStreamError, double learningRate)
{
if(hipblasDger((hipblasHandle_t)engine.m_handle, m_cols, upStreamInput.size(), &learningRate,
downStreamError.d_vals, 1, upStreamInput.d_vals, 1,
d_vals, m_cols) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDger failed");
}
double GCudaMatrix::rowSumAbs(GCudaEngine& engine, size_t row)
{
double res;
if(hipblasDasum((hipblasHandle_t)engine.m_handle, m_cols, d_vals + row * m_cols, 1, &res) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDasum failed");
return res;
}
double GCudaMatrix::rowSumSquare(GCudaEngine& engine, size_t row)
{
double res;
if(hipblasDdot((hipblasHandle_t)engine.m_handle, m_cols, d_vals + row * m_cols, 1, d_vals + row * m_cols, 1, &res) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDdot failed");
return res;
}
double GCudaMatrix::colSumAbs(GCudaEngine& engine, size_t col)
{
double res;
if(hipblasDasum((hipblasHandle_t)engine.m_handle, m_rows, d_vals + col, m_cols, &res) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDasum failed");
return res;
}
double GCudaMatrix::colSumSquare(GCudaEngine& engine, size_t col)
{
double res;
if(hipblasDdot((hipblasHandle_t)engine.m_handle, m_rows, d_vals + col, m_cols, d_vals + col, m_cols, &res) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDdot failed");
return res;
}
void GCudaMatrix::scaleRow(GCudaEngine& engine, size_t row, double scalar)
{
if(hipblasDscal((hipblasHandle_t)engine.m_handle, m_cols, &scalar, d_vals + row * m_cols, 1) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDscal failed");
}
void GCudaMatrix::scaleCol(GCudaEngine& engine, size_t col, double scalar)
{
if(hipblasDscal((hipblasHandle_t)engine.m_handle, m_rows, &scalar, d_vals + col, m_cols) != HIPBLAS_STATUS_SUCCESS)
throw Ex("hipblasDscal failed");
}
} // namespace GClasses
| 320e8a20b7b7bc25df313b36f0635ce7e8d5c804.cu | /*
The contents of this file are dedicated by all of its authors, including
Michael S. Gashler,
anonymous contributors,
to the public domain (http://creativecommons.org/publicdomain/zero/1.0/).
Note that some moral obligations still exist in the absence of legal ones.
For example, it would still be dishonest to deliberately misrepresent the
origin of a work. Although we impose no legal requirements to obtain a
license, it is beseeming for those who build on the works of others to
give back useful improvements, or find a way to pay it forward. If
you would like to cite us, a published paper about Waffles can be found
at http://jmlr.org/papers/volume12/gashler11a/gashler11a.pdf. If you find
our code to be useful, the Waffles team would love to hear how you use it.
*/
#include "GCudaMatrix.h"
#include "../../GClasses/GError.h"
#include "../../GClasses/GMatrix.h"
#include <cuda.h>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <curand.h>
namespace GClasses {
bool g_haveEngine = false;
GCudaEngine::GCudaEngine()
{
if(g_haveEngine)
throw Ex("There should only be one GCudaEngine in existence at any time");
g_haveEngine = true;
if(cublasCreate((cublasHandle_t*)&m_handle) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasCreate failed");
m_blockSize = 64;
if(curandCreateGenerator((curandGenerator_t*)&m_prng, CURAND_RNG_PSEUDO_DEFAULT) != CURAND_STATUS_SUCCESS)
throw Ex("curandCreateGenerator failed");
if(curandSetPseudoRandomGeneratorSeed((curandGenerator_t)m_prng, 1234ULL) != CURAND_STATUS_SUCCESS)
throw Ex("curandSetPseudoRandomGeneratorSeed failed");
m_hogWild = false;
}
GCudaEngine::~GCudaEngine()
{
if(cublasDestroy((cublasHandle_t)m_handle) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDestroy failed");
g_haveEngine = false;
}
void GCudaEngine::sync()
{
if(!m_hogWild)
{
if(cudaDeviceSynchronize() != cudaSuccess)
throw Ex(cudaGetErrorString(cudaGetLastError()));
}
}
GCudaVector::GCudaVector()
: m_size(0), d_vals(NULL)
{
}
GCudaVector::~GCudaVector()
{
if(d_vals)
cudaFree(d_vals);
}
void GCudaVector::resize(size_t size)
{
if(d_vals)
cudaFree(d_vals);
if(cudaMalloc((void**)&d_vals, size * sizeof(double)) != cudaSuccess)
throw Ex(cudaGetErrorString(cudaGetLastError()));
m_size = size;
}
void GCudaVector::upload(const double* pHostVector, size_t size)
{
if(m_size != size)
resize(size);
if(cublasSetVector(m_size, sizeof(double), pHostVector, 1, d_vals, 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasSetVector failed");
}
void GCudaVector::download(double* pOutHostVector)
{
if(cublasGetVector(m_size, sizeof(double), d_vals, 1, pOutHostVector, 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasGetVector failed");
}
void GCudaVector::copy(GCudaEngine& engine, const GCudaVector& that)
{
if(m_size != that.m_size)
resize(that.m_size);
if(cublasDcopy((cublasHandle_t)engine.m_handle, m_size, that.d_vals, 1, d_vals, 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDcopy failed");
}
void GCudaVector::add(GCudaEngine& engine, GCudaVector& that, double thatScalar)
{
GAssert(m_size == that.m_size);
if(cublasDaxpy((cublasHandle_t)engine.m_handle, m_size, &thatScalar,
that.d_vals, 1, d_vals, 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDaxpy failed");
}
void GCudaVector::scale(GCudaEngine& engine, double scalar)
{
if(cublasDscal((cublasHandle_t)engine.m_handle, m_size, &scalar, d_vals, 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDscal failed");
}
void GCudaVector::randomUniform(GCudaEngine& engine)
{
if(curandGenerateUniformDouble((curandGenerator_t)engine.m_prng, d_vals, m_size) != CURAND_STATUS_SUCCESS)
throw Ex("curandGenerateUniformDouble failed");
}
void GCudaVector::randomNormal(GCudaEngine& engine, double mean, double dev)
{
if(curandGenerateNormalDouble((curandGenerator_t)engine.m_prng, d_vals, m_size, mean, dev) != CURAND_STATUS_SUCCESS)
throw Ex("curandGenerateNormalDouble failed");
}
GCudaMatrix::GCudaMatrix()
: m_rows(0), m_cols(0), d_vals(NULL)
{
}
GCudaMatrix::~GCudaMatrix()
{
if(d_vals)
cudaFree(d_vals);
}
void GCudaMatrix::resize(size_t rows, size_t cols)
{
if(d_vals)
cudaFree(d_vals);
if(cudaMalloc((void**)&d_vals, rows * cols * sizeof(double)) != cudaSuccess)
throw Ex(cudaGetErrorString(cudaGetLastError()));
m_rows = rows;
m_cols = cols;
}
void GCudaMatrix::upload(const GMatrix& m)
{
if(m_rows != m.rows() || m_cols != m.cols())
resize(m.rows(), m.cols());
double* pVals = d_vals;
for(size_t i = 0; i < m_rows; i++)
{
if(cublasSetVector(m_cols, sizeof(double), m[i], 1, pVals, 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasSetVector failed");
pVals += m_cols;
}
}
void GCudaMatrix::download(GMatrix& m)
{
if(m.rows() != m_rows || m.cols() != m_cols)
m.resize(m_rows, m_cols);
double* pVals = d_vals;
for(size_t i = 0; i < m_rows; i++)
{
if(cublasGetVector(m_cols, sizeof(double), pVals, 1, m[i], 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasGetVector failed");
pVals += m_cols;
}
}
void GCudaMatrix::scale(GCudaEngine& engine, double scalar)
{
if(cublasDscal((cublasHandle_t)engine.m_handle, m_rows * m_cols, &scalar, d_vals, 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDscal failed");
}
void GCudaMatrix::add(GCudaEngine& engine, GCudaMatrix& that, double thatScalar)
{
GAssert(m.rows() == m_rows && m.cols() == m_cols);
if(cublasDaxpy((cublasHandle_t)engine.m_handle, m_rows * m_cols, &thatScalar,
that.d_vals, 1, d_vals, 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDaxpy failed");
}
void GCudaMatrix::rowVectorTimesThis(GCudaEngine& engine, const GCudaVector& in, GCudaVector& out)
{
GAssert(in.m_size == m_rows);
if(out.size() != m_cols)
out.resize(m_cols);
double alpha = 1.0f;
double beta = 0.0f;
if(cublasDgemv((cublasHandle_t)engine.m_handle, CUBLAS_OP_N,
m_cols, m_rows, &alpha, d_vals,
m_cols, in.d_vals, 1, &beta, out.d_vals, 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDgemv failed");
}
void GCudaMatrix::thisTimesColumnVector(GCudaEngine& engine, const GCudaVector& in, GCudaVector& out)
{
GAssert(in.m_size == m_cols);
if(out.m_size != m_rows)
out.resize(m_rows);
double alpha = 1.0f;
double beta = 0.0f;
if(cublasDgemv((cublasHandle_t)engine.m_handle, CUBLAS_OP_T,
m_cols, m_rows, &alpha, d_vals,
m_cols, in.d_vals, 1, &beta, out.d_vals, 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDgemv failed");
}
void GCudaMatrix::feedIn(GCudaEngine& engine, const GCudaVector& in, GCudaVector& out, size_t inputStart)
{
GAssert(inputStart + in.m_size <= m_rows);
GAssert(out.m_size == m_cols);
double alpha = 1.0f;
double beta = 1.0f;
if(cublasDgemv((cublasHandle_t)engine.m_handle, CUBLAS_OP_N,
m_cols, in.m_size, &alpha, d_vals + inputStart * m_cols,
m_cols, in.d_vals, 1, &beta, out.d_vals, 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDgemv failed");
}
void GCudaMatrix::backPropError(GCudaEngine& engine, const GCudaVector& in, GCudaVector& out, size_t inputStart)
{
GAssert(in.m_size == m_cols);
double alpha = 1.0f;
double beta = 0.0f;
if(cublasDgemv((cublasHandle_t)engine.m_handle, CUBLAS_OP_T,
in.size(), m_rows, &alpha, d_vals,
m_cols, in.d_vals + inputStart, 1, &beta, out.d_vals, 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDgemv failed");
}
void GCudaMatrix::addOuterProduct(GCudaEngine& engine, GCudaVector& upStreamInput, GCudaVector& downStreamError, double learningRate)
{
if(cublasDger((cublasHandle_t)engine.m_handle, m_cols, upStreamInput.size(), &learningRate,
downStreamError.d_vals, 1, upStreamInput.d_vals, 1,
d_vals, m_cols) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDger failed");
}
double GCudaMatrix::rowSumAbs(GCudaEngine& engine, size_t row)
{
double res;
if(cublasDasum((cublasHandle_t)engine.m_handle, m_cols, d_vals + row * m_cols, 1, &res) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDasum failed");
return res;
}
double GCudaMatrix::rowSumSquare(GCudaEngine& engine, size_t row)
{
double res;
if(cublasDdot((cublasHandle_t)engine.m_handle, m_cols, d_vals + row * m_cols, 1, d_vals + row * m_cols, 1, &res) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDdot failed");
return res;
}
double GCudaMatrix::colSumAbs(GCudaEngine& engine, size_t col)
{
double res;
if(cublasDasum((cublasHandle_t)engine.m_handle, m_rows, d_vals + col, m_cols, &res) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDasum failed");
return res;
}
double GCudaMatrix::colSumSquare(GCudaEngine& engine, size_t col)
{
double res;
if(cublasDdot((cublasHandle_t)engine.m_handle, m_rows, d_vals + col, m_cols, d_vals + col, m_cols, &res) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDdot failed");
return res;
}
void GCudaMatrix::scaleRow(GCudaEngine& engine, size_t row, double scalar)
{
if(cublasDscal((cublasHandle_t)engine.m_handle, m_cols, &scalar, d_vals + row * m_cols, 1) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDscal failed");
}
void GCudaMatrix::scaleCol(GCudaEngine& engine, size_t col, double scalar)
{
if(cublasDscal((cublasHandle_t)engine.m_handle, m_rows, &scalar, d_vals + col, m_cols) != CUBLAS_STATUS_SUCCESS)
throw Ex("cublasDscal failed");
}
} // namespace GClasses
|
8d88527b5ebe3bf7e1cda721d842cce4e51b67b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_bc_node.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define THREAD_NUM 256
#define DEBUG
__global__ void forward_kernel (int *outgoing_starts, int *outgoing_edges,
int *d, int *sigma, bool *cont, int *dist, int num_nodes) {
int v = blockIdx.x * blockDim.x + threadIdx.x;
if(v >= num_nodes){
return;
}
if(d[v] == *dist) {
int start = outgoing_starts[v];
int end = outgoing_starts[v + 1];
for(int p = start; p < end; p++) {
int w = outgoing_edges[p];
if(d[w] == NOT_VISITED_MARKER) {
d[w] = *dist + 1;
*cont = true;
}
if(d[w] == *dist + 1) {
atomicAdd(&sigma[w], sigma[v]);
}
}
}
}
__global__ void backward_kernel (int *outgoing_starts,
int* outgoing_edges, int *d, int *sigma, float *delta, float* bc,
int *dist, int num_nodes) {
int v = blockIdx.x * blockDim.x + threadIdx.x;
if(v >= num_nodes) {
return;
}
if(d[v] == *dist - 1) {
int start = outgoing_starts[v];
int end = outgoing_starts[v + 1];
float sum = 0;
for(int p = start; p < end; p++) {
int w = outgoing_edges[p];
if(d[w] == *dist) {
sum += (float)sigma[v] / sigma[w] * (delta[w] + 1);
}
}
delta[v] += sum;
}
}
__global__ void compute_bc_kernel (int node_id, int *d, float *delta,
float *bc, int num_nodes) {
int v = blockIdx.x * blockDim.x + threadIdx.x;
if(v < num_nodes && v != node_id && d[v] != NOT_VISITED_MARKER) {
bc[v] += delta[v];
}
}
/*
__global__ void compute_bc_kernel_deg1 (int s, int *d, float *delta, float *bc, int num_nodes, int* d_weight) {
int v = blockIdx.x * blockDim.x + threadIdx.x;
if(v < num_nodes && v != s && d[v] != -1) {
bc[v] += delta[v] * d_weight[s];
}
}
*/
__global__ void init_params_kernel (int s, int *d, int *sigma,
int num_nodes, int* dist){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i >= num_nodes) {
return;
}
if(s == i) {
d[i] = 0;
sigma[i] = 1;
*dist = 0;
} else {
d[i] = -1;
sigma[i] = 0;
}
}
/*
__global__ void set_int_vertex (int* dest, int val){
*dest = val;
}
*/
/*
__global__ void init_delta (int *d_weight, float* delta, int num_nodes) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < num_nodes) {
delta[i] = d_weight[i]-1;
}
}
*/
void setup(const graph *g, int **outgoing_starts, int **outgoing_edges,
int **d, int **sigma, float **delta, int **dist, float **bc,
bool **d_cont) {
int num_nodes = g->num_nodes;
int num_edges = g->num_edges;
hipMalloc((void **)outgoing_starts, sizeof(int) * (num_nodes + 1));
hipMalloc((void **)outgoing_edges, sizeof(int) * num_edges);
hipMemcpy(*outgoing_starts, g->outgoing_starts,
sizeof(int) * (num_nodes + 1), hipMemcpyHostToDevice);
hipMemcpy(*outgoing_edges, g->outgoing_edges,
sizeof(int) * num_edges, hipMemcpyHostToDevice);
hipMalloc((void **)d, sizeof(int) * num_nodes);
hipMalloc((void **)sigma, sizeof(int) * num_nodes);
hipMalloc((void **)delta, sizeof(float) * num_nodes);
hipMalloc((void **)dist, sizeof(int));
hipMalloc((void **)bc, sizeof(float) * num_nodes);
hipMemset(*bc, 0, sizeof(float) * num_nodes);
hipMalloc((void **)d_cont, sizeof(bool));
}
void clean(int *outgoing_starts, int *outgoing_edges, int *d,
int *sigma, float *delta, int *dist, float *bc, bool *d_cont) {
hipFree(outgoing_starts);
hipFree(outgoing_edges);
hipFree(d);
hipFree(sigma);
hipFree(delta);
hipFree(dist);
hipFree(bc);
hipFree(d_cont);
}
int gpu_bc_node (const graph *g, float *bc) {
int *device_outgoing_starts, *device_outgoing_edges;
int *device_d, *device_sigma, *device_dist, distance;
float *device_delta, *device_bc;
bool cont, *device_cont;
int num_nodes = g->num_nodes;
setup(g, &device_outgoing_starts, &device_outgoing_edges, &device_d,
&device_sigma, &device_delta, &device_dist, &device_bc, &device_cont);
dim3 blockDim(THREAD_NUM);
dim3 gridDim((g->num_nodes + blockDim.x - 1) / blockDim.x);
for(int node_id = 0; node_id < num_nodes; node_id++) {
distance = 0;
hipLaunchKernelGGL(( init_params_kernel), dim3(gridDim),dim3(blockDim), 0, 0, node_id, device_d, device_sigma,
num_nodes, device_dist);
// BFS
do {
hipMemset(device_cont, false, sizeof(bool));
hipLaunchKernelGGL(( forward_kernel), dim3(gridDim), dim3(blockDim), 0, 0, device_outgoing_starts,
device_outgoing_edges, device_d, device_sigma, device_cont,
device_dist, num_nodes);
hipDeviceSynchronize();
hipMemcpy(device_dist, &(++distance), sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&cont, device_cont, sizeof(bool), hipMemcpyDeviceToHost);
} while (cont);
//Back propagation
hipMemset(device_delta, 0, sizeof(int) * num_nodes);
hipMemcpy(device_dist, &(--distance), sizeof(int), hipMemcpyHostToDevice);
while (distance > 1) {
hipLaunchKernelGGL(( backward_kernel), dim3(gridDim), dim3(blockDim), 0, 0, device_outgoing_starts,
device_outgoing_edges, device_d, device_sigma, device_delta,
device_bc, device_dist, num_nodes);
hipDeviceSynchronize();
hipMemcpy(device_dist, &(--distance), sizeof(int), hipMemcpyHostToDevice);
}
hipLaunchKernelGGL(( compute_bc_kernel), dim3(gridDim), dim3(blockDim), 0, 0, node_id, device_d,
device_delta, device_bc, num_nodes);
}
hipMemcpy(bc, device_bc, sizeof(float)*num_nodes, hipMemcpyDeviceToHost);
clean(device_outgoing_starts, device_outgoing_edges, device_d,
device_sigma, device_delta, device_dist, device_bc, device_cont);
return 0;
}
| 8d88527b5ebe3bf7e1cda721d842cce4e51b67b7.cu | #include "gpu_bc_node.h"
#include <cuda.h>
#include <cuda_runtime.h>
#define THREAD_NUM 256
#define DEBUG
__global__ void forward_kernel (int *outgoing_starts, int *outgoing_edges,
int *d, int *sigma, bool *cont, int *dist, int num_nodes) {
int v = blockIdx.x * blockDim.x + threadIdx.x;
if(v >= num_nodes){
return;
}
if(d[v] == *dist) {
int start = outgoing_starts[v];
int end = outgoing_starts[v + 1];
for(int p = start; p < end; p++) {
int w = outgoing_edges[p];
if(d[w] == NOT_VISITED_MARKER) {
d[w] = *dist + 1;
*cont = true;
}
if(d[w] == *dist + 1) {
atomicAdd(&sigma[w], sigma[v]);
}
}
}
}
__global__ void backward_kernel (int *outgoing_starts,
int* outgoing_edges, int *d, int *sigma, float *delta, float* bc,
int *dist, int num_nodes) {
int v = blockIdx.x * blockDim.x + threadIdx.x;
if(v >= num_nodes) {
return;
}
if(d[v] == *dist - 1) {
int start = outgoing_starts[v];
int end = outgoing_starts[v + 1];
float sum = 0;
for(int p = start; p < end; p++) {
int w = outgoing_edges[p];
if(d[w] == *dist) {
sum += (float)sigma[v] / sigma[w] * (delta[w] + 1);
}
}
delta[v] += sum;
}
}
__global__ void compute_bc_kernel (int node_id, int *d, float *delta,
float *bc, int num_nodes) {
int v = blockIdx.x * blockDim.x + threadIdx.x;
if(v < num_nodes && v != node_id && d[v] != NOT_VISITED_MARKER) {
bc[v] += delta[v];
}
}
/*
__global__ void compute_bc_kernel_deg1 (int s, int *d, float *delta, float *bc, int num_nodes, int* d_weight) {
int v = blockIdx.x * blockDim.x + threadIdx.x;
if(v < num_nodes && v != s && d[v] != -1) {
bc[v] += delta[v] * d_weight[s];
}
}
*/
__global__ void init_params_kernel (int s, int *d, int *sigma,
int num_nodes, int* dist){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i >= num_nodes) {
return;
}
if(s == i) {
d[i] = 0;
sigma[i] = 1;
*dist = 0;
} else {
d[i] = -1;
sigma[i] = 0;
}
}
/*
__global__ void set_int_vertex (int* dest, int val){
*dest = val;
}
*/
/*
__global__ void init_delta (int *d_weight, float* delta, int num_nodes) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < num_nodes) {
delta[i] = d_weight[i]-1;
}
}
*/
void setup(const graph *g, int **outgoing_starts, int **outgoing_edges,
int **d, int **sigma, float **delta, int **dist, float **bc,
bool **d_cont) {
int num_nodes = g->num_nodes;
int num_edges = g->num_edges;
cudaMalloc((void **)outgoing_starts, sizeof(int) * (num_nodes + 1));
cudaMalloc((void **)outgoing_edges, sizeof(int) * num_edges);
cudaMemcpy(*outgoing_starts, g->outgoing_starts,
sizeof(int) * (num_nodes + 1), cudaMemcpyHostToDevice);
cudaMemcpy(*outgoing_edges, g->outgoing_edges,
sizeof(int) * num_edges, cudaMemcpyHostToDevice);
cudaMalloc((void **)d, sizeof(int) * num_nodes);
cudaMalloc((void **)sigma, sizeof(int) * num_nodes);
cudaMalloc((void **)delta, sizeof(float) * num_nodes);
cudaMalloc((void **)dist, sizeof(int));
cudaMalloc((void **)bc, sizeof(float) * num_nodes);
cudaMemset(*bc, 0, sizeof(float) * num_nodes);
cudaMalloc((void **)d_cont, sizeof(bool));
}
void clean(int *outgoing_starts, int *outgoing_edges, int *d,
int *sigma, float *delta, int *dist, float *bc, bool *d_cont) {
cudaFree(outgoing_starts);
cudaFree(outgoing_edges);
cudaFree(d);
cudaFree(sigma);
cudaFree(delta);
cudaFree(dist);
cudaFree(bc);
cudaFree(d_cont);
}
int gpu_bc_node (const graph *g, float *bc) {
int *device_outgoing_starts, *device_outgoing_edges;
int *device_d, *device_sigma, *device_dist, distance;
float *device_delta, *device_bc;
bool cont, *device_cont;
int num_nodes = g->num_nodes;
setup(g, &device_outgoing_starts, &device_outgoing_edges, &device_d,
&device_sigma, &device_delta, &device_dist, &device_bc, &device_cont);
dim3 blockDim(THREAD_NUM);
dim3 gridDim((g->num_nodes + blockDim.x - 1) / blockDim.x);
for(int node_id = 0; node_id < num_nodes; node_id++) {
distance = 0;
init_params_kernel<<<gridDim,blockDim>>>(node_id, device_d, device_sigma,
num_nodes, device_dist);
// BFS
do {
cudaMemset(device_cont, false, sizeof(bool));
forward_kernel<<<gridDim, blockDim>>>(device_outgoing_starts,
device_outgoing_edges, device_d, device_sigma, device_cont,
device_dist, num_nodes);
cudaDeviceSynchronize();
cudaMemcpy(device_dist, &(++distance), sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&cont, device_cont, sizeof(bool), cudaMemcpyDeviceToHost);
} while (cont);
//Back propagation
cudaMemset(device_delta, 0, sizeof(int) * num_nodes);
cudaMemcpy(device_dist, &(--distance), sizeof(int), cudaMemcpyHostToDevice);
while (distance > 1) {
backward_kernel<<<gridDim, blockDim>>>(device_outgoing_starts,
device_outgoing_edges, device_d, device_sigma, device_delta,
device_bc, device_dist, num_nodes);
cudaDeviceSynchronize();
cudaMemcpy(device_dist, &(--distance), sizeof(int), cudaMemcpyHostToDevice);
}
compute_bc_kernel<<<gridDim, blockDim>>>(node_id, device_d,
device_delta, device_bc, num_nodes);
}
cudaMemcpy(bc, device_bc, sizeof(float)*num_nodes, cudaMemcpyDeviceToHost);
clean(device_outgoing_starts, device_outgoing_edges, device_d,
device_sigma, device_delta, device_dist, device_bc, device_cont);
return 0;
}
|
f341e62ef0fa41baa02f09adc27206b3bd38b815.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void repeat0(float* in, float* out, int outStride0, int outStride1, int outScalarCount) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < outScalarCount; tid += stride) {
int linearIndex = tid;
int outIndex0 = linearIndex / outStride0;
linearIndex = linearIndex - outIndex0 * outStride0;
int outIndex1 = linearIndex / outStride1;
int outIndex2 = linearIndex - outIndex1 * outStride1;
int inIndex = outIndex2 + (outIndex0 + outIndex1) * outStride1;
out[tid] = in[inIndex];
}
} | f341e62ef0fa41baa02f09adc27206b3bd38b815.cu | #include "includes.h"
__global__ void repeat0(float* in, float* out, int outStride0, int outStride1, int outScalarCount) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < outScalarCount; tid += stride) {
int linearIndex = tid;
int outIndex0 = linearIndex / outStride0;
linearIndex = linearIndex - outIndex0 * outStride0;
int outIndex1 = linearIndex / outStride1;
int outIndex2 = linearIndex - outIndex1 * outStride1;
int inIndex = outIndex2 + (outIndex0 + outIndex1) * outStride1;
out[tid] = in[inIndex];
}
} |
3bc96d87c0e615e3d757f9337958831fac8a7ed7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include "lattice.h"
using namespace std;
__global__ void _Metropolis(int *a, const double beta, const int res, const double *real_dist)
{
const int N = blockDim.x;
int i = blockIdx.x;
int j = threadIdx.x;
if(((i+j)&1)==res) // flip spins of 'res'
{
// indexes around spin (i,j)
int i_ = _res(i+1,N)*N, _i = _res(i-1,N)*N;
i *= N;
int j_ = _res(j+1,N), _j = _res(j-1,N);
int I = i+j;
// calculate energy difference
int spin_sum = a[i_+j]+a[_i+j]+a[i+j_]+a[i+_j];
double h_diff = 2*beta*a[I]*spin_sum;
// attempt to flip the spin
if(h_diff<0||exp(-h_diff)>real_dist[I]) a[I] *= -1;
}
}
void Metropolis_sweep(const lattice &sigma, const double beta)
{
const int N = sigma.N;
hiprandGenerateUniformDouble(sigma.gen, sigma.real_dist, N*N); // generate (0,1) random numbers
hipLaunchKernelGGL(( _Metropolis), dim3(N),dim3(N), 0, 0, sigma.a, beta, 0, sigma.real_dist); // sweep even (0) spins
hipLaunchKernelGGL(( _Metropolis), dim3(N),dim3(N), 0, 0, sigma.a, beta, 1, sigma.real_dist); // sweep odd (1) spins
}
| 3bc96d87c0e615e3d757f9337958831fac8a7ed7.cu | #include <cmath>
#include "lattice.h"
using namespace std;
__global__ void _Metropolis(int *a, const double beta, const int res, const double *real_dist)
{
const int N = blockDim.x;
int i = blockIdx.x;
int j = threadIdx.x;
if(((i+j)&1)==res) // flip spins of 'res'
{
// indexes around spin (i,j)
int i_ = _res(i+1,N)*N, _i = _res(i-1,N)*N;
i *= N;
int j_ = _res(j+1,N), _j = _res(j-1,N);
int I = i+j;
// calculate energy difference
int spin_sum = a[i_+j]+a[_i+j]+a[i+j_]+a[i+_j];
double h_diff = 2*beta*a[I]*spin_sum;
// attempt to flip the spin
if(h_diff<0||exp(-h_diff)>real_dist[I]) a[I] *= -1;
}
}
void Metropolis_sweep(const lattice &sigma, const double beta)
{
const int N = sigma.N;
curandGenerateUniformDouble(sigma.gen, sigma.real_dist, N*N); // generate (0,1) random numbers
_Metropolis<<<N,N>>>(sigma.a, beta, 0, sigma.real_dist); // sweep even (0) spins
_Metropolis<<<N,N>>>(sigma.a, beta, 1, sigma.real_dist); // sweep odd (1) spins
}
|
0779391ee09e6f7d520a275917fd6637ba286494.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @brief Countmin-CU sketch
*
* CUDA implementation
*
* @file sketch.cpp
* @author Hans Lehnert
*/
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <chrono>
#include <limits>
#include <unordered_set>
#include "fasta.hpp"
const unsigned int N_HASH = 4;
const unsigned int M = 14;
const unsigned int RHO = 145;
// __constant__ unsigned short d_seeds[N_HASH * 32];
/**
* @brief Compute H3 hash
*/
template <int bits>
__device__ unsigned int hashH3(unsigned long key, unsigned short* seeds) {
unsigned int result = 0;
for (int i = 0; i < bits; i++) {
if (key & 1)
result ^= seeds[i];
key >>= 1;
}
return result;
}
template <int n_hash, int bits>
__global__ void countminCu(
unsigned int n,
unsigned long* keys,
unsigned short* seeds,
unsigned int* sketch,
unsigned long* heavy_hitters,
unsigned int* count) {
unsigned int start_index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = start_index; i < n; i += stride) {
unsigned int min_hits = ~0;
unsigned short hashes[n_hash];
for (unsigned int j = 0; j < n_hash; j++) {
hashes[j] = hashH3<32>(keys[i], seeds + j * bits);
if (sketch[hashes[j] + (j << M)] < min_hits) {
min_hits = sketch[hashes[j] + (j << M)];
}
}
for (unsigned int j = 0; j < n_hash; j++) {
if (sketch[hashes[j] + (j << M)] == min_hits) {
atomicAdd(&sketch[hashes[j] + (j << M)], 1);
}
}
if (min_hits + 1 == RHO) {
unsigned long pos = atomicAdd(count, 1);
heavy_hitters[pos] = keys[i];
}
}
}
int main(int argc, char* argv[]) {
if (argc < 2) {
std::cerr << "Missing dataset file." << std::endl;
return 1;
}
// Generate hash vectors
unsigned short* d_seeds;
unsigned short h_seeds[N_HASH * 32];
for (unsigned int i = 0; i < N_HASH * 32; i++)
h_seeds[i] = rand() & ((1 << M) - 1);
hipMalloc(&d_seeds, sizeof(h_seeds));
hipMemcpy(d_seeds, h_seeds, sizeof(h_seeds), hipMemcpyHostToDevice);
// Start time measurement
auto start = std::chrono::steady_clock::now();
// Parse data set and transfer to device
std::ifstream dataset_file(argv[1]);
std::vector<unsigned long> data_vector = parseFasta(dataset_file, 16);
dataset_file.close();
unsigned long n_data = data_vector.size();
unsigned long* h_data;
unsigned long* d_data;
hipHostMalloc(
&h_data, n_data * sizeof(unsigned long), hipHostMallocWriteCombined);
hipMemcpyAsync(
h_data,
data_vector.data(),
n_data * sizeof(unsigned long),
hipMemcpyHostToHost);
hipMalloc(&d_data, n_data * sizeof(unsigned long));
hipMemcpyAsync(
d_data,
h_data,
n_data * sizeof(unsigned long),
hipMemcpyHostToDevice);
// Hash values
unsigned int* sketch;
unsigned long* d_heavy_hitters;
unsigned int* heavy_hitters_count;
hipMalloc(&sketch, (N_HASH << M) * sizeof(unsigned int));
hipMalloc(&d_heavy_hitters, (1 << 10) * sizeof(unsigned long));
hipMallocManaged(&heavy_hitters_count, sizeof(unsigned int));
hipDeviceSynchronize();
int block_size = 256;
int num_blocks = 16;
hipLaunchKernelGGL(( countminCu<N_HASH, 32>), dim3(num_blocks), dim3(block_size), 0, 0,
n_data,
d_data,
d_seeds,
sketch,
d_heavy_hitters,
heavy_hitters_count);
hipPeekAtLastError();
hipDeviceSynchronize();
unsigned long* h_heavy_hitters = new unsigned long[*heavy_hitters_count];
hipMemcpy(
h_heavy_hitters,
d_heavy_hitters,
*heavy_hitters_count * sizeof(unsigned long),
hipMemcpyDeviceToHost);
// End time measurement
auto end = std::chrono::steady_clock::now();
std::chrono::duration<double> diff = end - start;
std::clog << "Execution time: " << diff.count() << " s" << std::endl;
std::clog << "Data vectors: " << n_data << std::endl;
std::clog << "Heavy-hitters: " << *heavy_hitters_count << std::endl;
// Print heavy-hitters
for (int i = 0; i < *heavy_hitters_count; i++) {
std::cout << sequenceToString(h_heavy_hitters[i], 16) << std::endl;
}
// Free shared memory
hipFree(d_data);
hipHostFree(h_data);
return 0;
}
| 0779391ee09e6f7d520a275917fd6637ba286494.cu | /**
* @brief Countmin-CU sketch
*
* CUDA implementation
*
* @file sketch.cpp
* @author Hans Lehnert
*/
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <chrono>
#include <limits>
#include <unordered_set>
#include "fasta.hpp"
const unsigned int N_HASH = 4;
const unsigned int M = 14;
const unsigned int RHO = 145;
// __constant__ unsigned short d_seeds[N_HASH * 32];
/**
* @brief Compute H3 hash
*/
template <int bits>
__device__ unsigned int hashH3(unsigned long key, unsigned short* seeds) {
unsigned int result = 0;
for (int i = 0; i < bits; i++) {
if (key & 1)
result ^= seeds[i];
key >>= 1;
}
return result;
}
template <int n_hash, int bits>
__global__ void countminCu(
unsigned int n,
unsigned long* keys,
unsigned short* seeds,
unsigned int* sketch,
unsigned long* heavy_hitters,
unsigned int* count) {
unsigned int start_index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = start_index; i < n; i += stride) {
unsigned int min_hits = ~0;
unsigned short hashes[n_hash];
for (unsigned int j = 0; j < n_hash; j++) {
hashes[j] = hashH3<32>(keys[i], seeds + j * bits);
if (sketch[hashes[j] + (j << M)] < min_hits) {
min_hits = sketch[hashes[j] + (j << M)];
}
}
for (unsigned int j = 0; j < n_hash; j++) {
if (sketch[hashes[j] + (j << M)] == min_hits) {
atomicAdd(&sketch[hashes[j] + (j << M)], 1);
}
}
if (min_hits + 1 == RHO) {
unsigned long pos = atomicAdd(count, 1);
heavy_hitters[pos] = keys[i];
}
}
}
int main(int argc, char* argv[]) {
if (argc < 2) {
std::cerr << "Missing dataset file." << std::endl;
return 1;
}
// Generate hash vectors
unsigned short* d_seeds;
unsigned short h_seeds[N_HASH * 32];
for (unsigned int i = 0; i < N_HASH * 32; i++)
h_seeds[i] = rand() & ((1 << M) - 1);
cudaMalloc(&d_seeds, sizeof(h_seeds));
cudaMemcpy(d_seeds, h_seeds, sizeof(h_seeds), cudaMemcpyHostToDevice);
// Start time measurement
auto start = std::chrono::steady_clock::now();
// Parse data set and transfer to device
std::ifstream dataset_file(argv[1]);
std::vector<unsigned long> data_vector = parseFasta(dataset_file, 16);
dataset_file.close();
unsigned long n_data = data_vector.size();
unsigned long* h_data;
unsigned long* d_data;
cudaHostAlloc(
&h_data, n_data * sizeof(unsigned long), cudaHostAllocWriteCombined);
cudaMemcpyAsync(
h_data,
data_vector.data(),
n_data * sizeof(unsigned long),
cudaMemcpyHostToHost);
cudaMalloc(&d_data, n_data * sizeof(unsigned long));
cudaMemcpyAsync(
d_data,
h_data,
n_data * sizeof(unsigned long),
cudaMemcpyHostToDevice);
// Hash values
unsigned int* sketch;
unsigned long* d_heavy_hitters;
unsigned int* heavy_hitters_count;
cudaMalloc(&sketch, (N_HASH << M) * sizeof(unsigned int));
cudaMalloc(&d_heavy_hitters, (1 << 10) * sizeof(unsigned long));
cudaMallocManaged(&heavy_hitters_count, sizeof(unsigned int));
cudaDeviceSynchronize();
int block_size = 256;
int num_blocks = 16;
countminCu<N_HASH, 32><<<num_blocks, block_size>>>(
n_data,
d_data,
d_seeds,
sketch,
d_heavy_hitters,
heavy_hitters_count);
cudaPeekAtLastError();
cudaDeviceSynchronize();
unsigned long* h_heavy_hitters = new unsigned long[*heavy_hitters_count];
cudaMemcpy(
h_heavy_hitters,
d_heavy_hitters,
*heavy_hitters_count * sizeof(unsigned long),
cudaMemcpyDeviceToHost);
// End time measurement
auto end = std::chrono::steady_clock::now();
std::chrono::duration<double> diff = end - start;
std::clog << "Execution time: " << diff.count() << " s" << std::endl;
std::clog << "Data vectors: " << n_data << std::endl;
std::clog << "Heavy-hitters: " << *heavy_hitters_count << std::endl;
// Print heavy-hitters
for (int i = 0; i < *heavy_hitters_count; i++) {
std::cout << sequenceToString(h_heavy_hitters[i], 16) << std::endl;
}
// Free shared memory
cudaFree(d_data);
cudaFreeHost(h_data);
return 0;
}
|
831599a791acaa571ede16f643e677e109a36fd2.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=1024 --gridDim=1 --no-inline
#include <hip/hip_runtime.h>
#include <stdio.h>
#define N 2 //1024
__global__ void definitions (unsigned int* B)
{
atomicInc(B,7);//0111 -> 1000 -> 0000 -> 0001 -> 0010 -> 0011 -> 0100 -> 0101 -> 0110 ...
/*the second argument on atomicInc() is a limit for increments. When this limit is reached, B receives 0*/
}
| 831599a791acaa571ede16f643e677e109a36fd2.cu | //pass
//--blockDim=1024 --gridDim=1 --no-inline
#include <cuda.h>
#include <stdio.h>
#define N 2 //1024
__global__ void definitions (unsigned int* B)
{
atomicInc(B,7);//0111 -> 1000 -> 0000 -> 0001 -> 0010 -> 0011 -> 0100 -> 0101 -> 0110 ...
/*the second argument on atomicInc() is a limit for increments. When this limit is reached, B receives 0*/
}
|
004c15cf561c1006ec9fc0513fedf4c2fce8bdf4.hip | // !!! This is a file automatically generated by hipify!!!
#include <unistd.h>
#include <time.h>
#include <string.h>
#include <unistd.h>
#include <math.h>
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h>
#include "elastic_kernel.h"
#include <hip/hip_runtime_api.h>
// Tables to store results for solo exectuions
extern t_smk_solo *smk_solo; //
extern t_smt_solo *smt_solo;
// Tables to store coexecution results
extern t_smk_coBlocks **smk_conc;
extern t_smt_coBlocks **smt_conc; //tpms of each kernel in coexection
// Table to store better speedups in coexecution
extern t_co_speedup **smk_best_sp;
extern t_co_speedup **smt_best_sp;
int interleaved_launch_coexec(t_kcoexec *coexec)
{
//Launch interleaved BSUs (streams) belonging to different kernels
int new_streams_k0 = coexec->num_streams[0]-coexec->kstr[0]->num_streams;
int new_streams_k1 = coexec->num_streams[1]-coexec->kstr[1]->num_streams;
if (new_streams_k0 < new_streams_k1) {
// launch interleaved streams
for (int i=0; i< new_streams_k0; i++){
launch_SMK_kernel(coexec->kstr[0], 1);
launch_SMK_kernel(coexec->kstr[1], 1);
}
// Laiuch remainning streams of k1
launch_SMK_kernel(coexec->kstr[1], new_streams_k1-new_streams_k0);
}
else {
// launch interleaved streams
for (int i=0; i< new_streams_k1; i++){
launch_SMK_kernel(coexec->kstr[0], 1);
launch_SMK_kernel(coexec->kstr[1], 1);
}
launch_SMK_kernel(coexec->kstr[0], new_streams_k0-new_streams_k1);
}
return 0;
}
typedef enum {START, FORWARD, BACKWARD} t_search; // Search in co-execution configuration set. START: first search using FORWARD direction.
int pair_overhed(t_kernel_stub *kstub0, t_kernel_stub *kstub1)
{
struct timespec now;
char name0[20], name1[20]; // Kernel names
// Temporal model variables: waiting times in seconds
double w_launch = 0.000200; //0.000120; // Wait until all streas are running
double w_sample = 0.000030; // waiting time between samples (executed_tasks), that is, the inverse of sampling rate: fixed to the minimum eviction time divided by two
// Load profiling tables: only set of coexecution configurations are necessary
read_profling_tables();
// Create streams kernel info for coexecution
t_kstreams *kstr = (t_kstreams *)calloc(2, sizeof(t_kstreams));
create_kstreams(kstub0, &kstr[0]);
create_kstreams(kstub1, &kstr[1]);
// Coxecution info
t_kcoexec coexec;
create_coexec(&coexec, 2);
// Launch proxy
t_sched sched;
create_sched(&sched);
launch_generic_proxy((void *)&sched); // Launch proxy
// Kids
int kid0 = kstub0->id;
int kid1 = kstub1->id;
kid_from_index(kid0, name0);
kid_from_index(kid1, name1);
// Get coexecution configuration located in central position
int num_confs = smk_conc[kid0][kid1].num_configs;
int index = num_confs/2;
int prev_index;
int b0 = smk_conc[kid0][kid1].pairs[index][0];
int b1 = smk_conc[kid0][kid1].pairs[index][1];
int prev_b0, prev_b1;
double prev_ter0 = -1, prev_ter1=-1;
double ter0, ter1;
t_search search = START;
// Add streams (BSUs)
add_kernel_for_coexecution(&coexec, &sched, &kstr[0], b0, 0); // Add b0 streams
add_kernel_for_coexecution(&coexec, &sched, &kstr[1], b1, 1); // Add b1 streams
// Execute kernels (launching streams) in coexec structure
interleaved_launch_coexec(&coexec);
clock_gettime(CLOCK_REALTIME, &now);
double time1 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double time2 = time1;
double start_time = time2;
double prof_ini = time1;
// Wait until launching ends
while ((time2 - time1) < w_launch){
clock_gettime(CLOCK_REALTIME, &now);
time2 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
}
time1 = time2;
//printf("PROF: Conf(%d,%d)\n", b0, b1);
while (1) {
// Set start time: start of sampling of a co-execution configuration
clock_gettime(CLOCK_REALTIME, &now);
start_time = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
// Get number of task at the beginning of the sampling
int executed_tasks0 = *(sched.cont_tasks_zc + 0);
int executed_tasks1 = *(sched.cont_tasks_zc + 1);
// Take samples until they are stable
double lprev_ter0=0, lprev_ter1=0;
while (1) {
// Interval between consecutive samples
while ((time2 - time1) < w_sample){
clock_gettime(CLOCK_REALTIME, &now);
time2 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
}
time1 = time2;
// Performance with WS
int cont_task0 = *(sched.cont_tasks_zc + 0);
int cont_task1 = *(sched.cont_tasks_zc + 1);
if (cont_task0 >= kstub0->total_tasks || cont_task1 >= kstub1->total_tasks) {
// Evict proxy
sched.kernel_evict_zc[0] = PROXY_EVICT;
hipDeviceSynchronize();
// If a kernel finished during profiling, exit
if (cont_task0 >= kstub0->total_tasks)
printf("PROF:, %s-%s, %f, %f First kernel has finished\n", name0, name1, ter0, ter1);
else
printf("PROF:, %s-%s, %f, %f Second kernel has finished\n", name0, name1, ter0, ter1);
// Free
remove_kstreams(kstr);
free(kstr);
remove_coexec(&coexec);
return -1;
}
ter0 = (double)(cont_task0-executed_tasks0)/(time2 - start_time);
ter1 = (double)(cont_task1-executed_tasks1)/(time2 - start_time);
// Compare previous samples with the current ones
if (ter0 !=0 && ter1 !=0) // Is some ter is zero, go on taking samples
if (fabs(ter0-lprev_ter0)<0.2*ter0 && fabs(ter1-lprev_ter1)<0.2*ter1) { // Is samples are stable go to WS calculation
printf("(ter0=%f ter1=%f) (prev_ter0=%f prev_ter1=%f) \n", ter0, ter1, lprev_ter0, lprev_ter1);
printf("(ter0-prev_ter0=%f, %f ter1-prev_ter1=%f, %f) \n", fabs(ter0-lprev_ter0), 0.1*ter0, fabs(ter1-lprev_ter1), 0.1*ter1);
break;
}
lprev_ter0=ter0;
lprev_ter1=ter1;
}
if (prev_ter0 != -1){ // Flasg to indicate that TER for two configurations have been calculated
double ws = 0.5*(ter0/prev_ter0 + ter1/prev_ter1);
printf("WS Conf(%d->%d) %f\n", index, prev_index, ws);
if (search == START) { //If first ws calculated calculated
if (ws < 1) { // change direction of the search
search = BACKWARD;
ter0 = prev_ter0;
ter1 = prev_ter1;
index = prev_index;
b0 = prev_b0;
b1 = prev_b1;
}
else
search = FORWARD;
}
else
if (ws < 1) {
printf("Best configuration achieved %s/%s=(%d,%d)\n", name0, name1, smk_conc[kid0][kid1].pairs[prev_index][0], smk_conc[kid0][kid1].pairs[prev_index][1]);
break;
}
}
// Save values from current configuration
prev_index = index;
prev_ter0 = ter0;
prev_ter1 = ter1;
prev_b0 = b0;
prev_b1 = b1;
// Change search config
if (search == START || search == FORWARD)
index = index +1; //New condifuracion
else
index = index-1;
if (index >= num_confs || index < 0) {
printf("Configuracion extrema alcanzada %s/%s=(%d,%d)\n", name0, name1, smk_conc[kid0][kid1].pairs[prev_index][0], smk_conc[kid0][kid1].pairs[prev_index][1]);
break;
}
b0 = smk_conc[kid0][kid1].pairs[index][0];
b1 = smk_conc[kid0][kid1].pairs[index][1];
//printf("PROF: Conf(%d,%d)\n", b0, b1);
if (b0 < coexec.num_streams[0]){ // If k0 has less BSUs
evict_streams(coexec.kstr[0], coexec.num_streams[0] - b0); // Evict BSU(s)
//printf("Conf=%d Eviciting %d BSUs the kernel 0\n", index, coexec.num_streams[0] - b0);
coexec.num_streams[0] -= (coexec.num_streams[0] - b0);
//printf("Conf=%d Lanzando %d BSUs the kernel 1\n", index, b1 - coexec.num_streams[1]);
add_streams_to_kernel(&coexec, &sched, coexec.kstr[1], b1-coexec.num_streams[1]);
launch_coexec(&coexec);
}
else { // b1 has less BSUs
evict_streams(coexec.kstr[1], coexec.num_streams[1] - b1); // Evict BSU(s)
//printf("Conf=%d Eviciting %d BSUs the kernel 1\n", index, coexec.num_streams[1] - b1);
coexec.num_streams[1] -= (coexec.num_streams[1] - b1);
//printf("Conf=%d Lanzando %d BSUs the kernel 0\n", index, b0 - coexec.num_streams[0]);
add_streams_to_kernel(&coexec, &sched, coexec.kstr[0], b0-coexec.num_streams[0]);
launch_coexec(&coexec);
}
}
// Read executed tasks during profiling for each kernel
int executed_task0 = *(sched.cont_tasks_zc + 0);
int executed_task1 = *(sched.cont_tasks_zc + 1);
// Get tpms achieved for each kernel during solo execution
double tpms0 = smk_solo[kid0].tpms[smk_solo[kid0].num_configs-1];
double tpms1 = smk_solo[kid1].tpms[smk_solo[kid1].num_configs-1];
// Calculate the time taken for both kernels, sequentially executed, to run those tasks
double seq_time = (double)executed_task0/tpms0 + (double)executed_task1/tpms1;
// Get time to calculate profiling time;
clock_gettime(CLOCK_REALTIME, &now);
time1 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
// Compare profiling time with sequential time executing the sae number of tasks
printf("Label, K0/K1, Config, Ptime(ms), Stime(ms), HyperQtime (ms)\n");
printf("PROF:, %s-%s, %d-%d, %f, %f", name0, name1, smk_conc[kid0][kid1].pairs[prev_index][0], smk_conc[kid0][kid1].pairs[prev_index][1], (time1-prof_ini)*1000, seq_time);
// Evict proxy
sched.kernel_evict_zc[0] = PROXY_EVICT;
hipDeviceSynchronize();
// Check sequential time using two streams
kstub0->kconf.max_persistent_blocks = 8;
int save_total_task0 = kstub0->total_tasks;
kstub0->total_tasks = executed_task0;
kstub1->kconf.max_persistent_blocks = 8;
int save_total_task1 = kstub1->total_tasks;
kstub1->total_tasks = executed_task1;
for (int i=0; i<MAX_STREAMS_PER_KERNEL; i++)
kstub0->h_state[i] = PREP;
hipMemcpy(kstub0->gm_state, kstub0->h_state, sizeof(State) * MAX_STREAMS_PER_KERNEL, hipMemcpyHostToDevice);
kstub0->execution_s = &(kstr[0].str[0]); //Stream id
kstub0->stream_index = 0; // Index used by kernel to test state of i-esimo stream
for (int i=0; i<MAX_STREAMS_PER_KERNEL; i++)
kstub1->h_state[i] = PREP;
hipMemcpy(kstub1->gm_state, kstub1->h_state, sizeof(State) * MAX_STREAMS_PER_KERNEL, hipMemcpyHostToDevice);
kstub1->execution_s = &(kstr[1].str[0]); //Stream id
kstub1->stream_index = 0; // Index used by kernel to test state of i-esimo stream
// Use all the SMs
int idSMs[2];
idSMs[0] = 0;idSMs[1] = kstr->kstub->kconf.numSMs-1;
kstub0->idSMs = idSMs;
kstub1->idSMs = idSMs;
hipMemset(kstub0->d_executed_tasks, 0, sizeof(int));
hipMemset(kstub1->d_executed_tasks, 0, sizeof(int));
// Get time to calculate profiling time;
clock_gettime(CLOCK_REALTIME, &now);
time1 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
(kstub0->launchCKEkernel)(kstub0);
(kstub1->launchCKEkernel)(kstub1);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &now);
time2 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
printf(", %f\n", (time2-time1)*1000);
// Restore original value
kstub0->total_tasks = save_total_task0;
kstub1->total_tasks = save_total_task1;
// Free
remove_kstreams(kstr);
free(kstr);
remove_coexec(&coexec);
return 0;
}
// Applications is composed of one or several kernels
typedef struct{
int num_kernels;
int index;
t_Kernel kid[8]; // Max: 8 kernels per application
t_kernel_stub* kstubs[8]; // One kernel stub per kernel
}t_application;
int online_profiler_overhead(t_Kernel *kid, int num_kernels, int deviceId)
{
struct timespec now;
hipError_t err;
// Select device
hipSetDevice(deviceId);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, deviceId);
printf("Device=%s\n", deviceProp.name);
/** Create commom streams for all kernels: two for asynchronous transfers, one for preemption commands*/
hipStream_t *transfers_s;
transfers_s = (hipStream_t *)calloc(2, sizeof(hipStream_t));
for (int i=0;i<2;i++){
err = hipStreamCreate(&transfers_s[i]);
checkCudaErrors(err);
}
hipStream_t preemp_s;
checkCudaErrors(hipStreamCreateWithFlags(&preemp_s, hipStreamNonBlocking));
/** Create stubs ***/
int max_num_kernels=13;
t_kernel_stub **kstubs = (t_kernel_stub **)calloc(max_num_kernels, sizeof(t_kernel_stub*));
int cont=0;
for (int i=0; i<num_kernels; i++) {
create_stubinfo(&kstubs[cont++], deviceId, kid[i], transfers_s, &preemp_s);
if (kid[i] == GCEDD) {
create_stubinfo_with_params(&kstubs[cont++], deviceId, SCEDD, transfers_s, &preemp_s, kstubs[i]->params);
create_stubinfo_with_params(&kstubs[cont++], deviceId, NCEDD, transfers_s, &preemp_s, kstubs[i]->params);
create_stubinfo_with_params(&kstubs[cont++], deviceId, HCEDD, transfers_s, &preemp_s, kstubs[i]->params);
}
if (kid[i] == RCONV){
create_stubinfo_with_params(&kstubs[cont++], deviceId, CCONV, transfers_s, &preemp_s, kstubs[i]->params);
}
}
// Make allocation and HtD transfer for kernels
make_transfers(kstubs, cont);
for (int i=0; i<cont; i++)
for (int j=i+1; j<cont; j++)
pair_overhed(kstubs[i], kstubs[j]);
return 0;
} | 004c15cf561c1006ec9fc0513fedf4c2fce8bdf4.cu | #include <unistd.h>
#include <time.h>
#include <string.h>
#include <unistd.h>
#include <math.h>
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h>
#include "elastic_kernel.h"
#include <cuda_profiler_api.h>
// Tables to store results for solo exectuions
extern t_smk_solo *smk_solo; //
extern t_smt_solo *smt_solo;
// Tables to store coexecution results
extern t_smk_coBlocks **smk_conc;
extern t_smt_coBlocks **smt_conc; //tpms of each kernel in coexection
// Table to store better speedups in coexecution
extern t_co_speedup **smk_best_sp;
extern t_co_speedup **smt_best_sp;
int interleaved_launch_coexec(t_kcoexec *coexec)
{
//Launch interleaved BSUs (streams) belonging to different kernels
int new_streams_k0 = coexec->num_streams[0]-coexec->kstr[0]->num_streams;
int new_streams_k1 = coexec->num_streams[1]-coexec->kstr[1]->num_streams;
if (new_streams_k0 < new_streams_k1) {
// launch interleaved streams
for (int i=0; i< new_streams_k0; i++){
launch_SMK_kernel(coexec->kstr[0], 1);
launch_SMK_kernel(coexec->kstr[1], 1);
}
// Laiuch remainning streams of k1
launch_SMK_kernel(coexec->kstr[1], new_streams_k1-new_streams_k0);
}
else {
// launch interleaved streams
for (int i=0; i< new_streams_k1; i++){
launch_SMK_kernel(coexec->kstr[0], 1);
launch_SMK_kernel(coexec->kstr[1], 1);
}
launch_SMK_kernel(coexec->kstr[0], new_streams_k0-new_streams_k1);
}
return 0;
}
typedef enum {START, FORWARD, BACKWARD} t_search; // Search in co-execution configuration set. START: first search using FORWARD direction.
int pair_overhed(t_kernel_stub *kstub0, t_kernel_stub *kstub1)
{
struct timespec now;
char name0[20], name1[20]; // Kernel names
// Temporal model variables: waiting times in seconds
double w_launch = 0.000200; //0.000120; // Wait until all streas are running
double w_sample = 0.000030; // waiting time between samples (executed_tasks), that is, the inverse of sampling rate: fixed to the minimum eviction time divided by two
// Load profiling tables: only set of coexecution configurations are necessary
read_profling_tables();
// Create streams kernel info for coexecution
t_kstreams *kstr = (t_kstreams *)calloc(2, sizeof(t_kstreams));
create_kstreams(kstub0, &kstr[0]);
create_kstreams(kstub1, &kstr[1]);
// Coxecution info
t_kcoexec coexec;
create_coexec(&coexec, 2);
// Launch proxy
t_sched sched;
create_sched(&sched);
launch_generic_proxy((void *)&sched); // Launch proxy
// Kids
int kid0 = kstub0->id;
int kid1 = kstub1->id;
kid_from_index(kid0, name0);
kid_from_index(kid1, name1);
// Get coexecution configuration located in central position
int num_confs = smk_conc[kid0][kid1].num_configs;
int index = num_confs/2;
int prev_index;
int b0 = smk_conc[kid0][kid1].pairs[index][0];
int b1 = smk_conc[kid0][kid1].pairs[index][1];
int prev_b0, prev_b1;
double prev_ter0 = -1, prev_ter1=-1;
double ter0, ter1;
t_search search = START;
// Add streams (BSUs)
add_kernel_for_coexecution(&coexec, &sched, &kstr[0], b0, 0); // Add b0 streams
add_kernel_for_coexecution(&coexec, &sched, &kstr[1], b1, 1); // Add b1 streams
// Execute kernels (launching streams) in coexec structure
interleaved_launch_coexec(&coexec);
clock_gettime(CLOCK_REALTIME, &now);
double time1 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
double time2 = time1;
double start_time = time2;
double prof_ini = time1;
// Wait until launching ends
while ((time2 - time1) < w_launch){
clock_gettime(CLOCK_REALTIME, &now);
time2 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
}
time1 = time2;
//printf("PROF: Conf(%d,%d)\n", b0, b1);
while (1) {
// Set start time: start of sampling of a co-execution configuration
clock_gettime(CLOCK_REALTIME, &now);
start_time = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
// Get number of task at the beginning of the sampling
int executed_tasks0 = *(sched.cont_tasks_zc + 0);
int executed_tasks1 = *(sched.cont_tasks_zc + 1);
// Take samples until they are stable
double lprev_ter0=0, lprev_ter1=0;
while (1) {
// Interval between consecutive samples
while ((time2 - time1) < w_sample){
clock_gettime(CLOCK_REALTIME, &now);
time2 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
}
time1 = time2;
// Performance with WS
int cont_task0 = *(sched.cont_tasks_zc + 0);
int cont_task1 = *(sched.cont_tasks_zc + 1);
if (cont_task0 >= kstub0->total_tasks || cont_task1 >= kstub1->total_tasks) {
// Evict proxy
sched.kernel_evict_zc[0] = PROXY_EVICT;
cudaDeviceSynchronize();
// If a kernel finished during profiling, exit
if (cont_task0 >= kstub0->total_tasks)
printf("PROF:, %s-%s, %f, %f First kernel has finished\n", name0, name1, ter0, ter1);
else
printf("PROF:, %s-%s, %f, %f Second kernel has finished\n", name0, name1, ter0, ter1);
// Free
remove_kstreams(kstr);
free(kstr);
remove_coexec(&coexec);
return -1;
}
ter0 = (double)(cont_task0-executed_tasks0)/(time2 - start_time);
ter1 = (double)(cont_task1-executed_tasks1)/(time2 - start_time);
// Compare previous samples with the current ones
if (ter0 !=0 && ter1 !=0) // Is some ter is zero, go on taking samples
if (fabs(ter0-lprev_ter0)<0.2*ter0 && fabs(ter1-lprev_ter1)<0.2*ter1) { // Is samples are stable go to WS calculation
printf("(ter0=%f ter1=%f) (prev_ter0=%f prev_ter1=%f) \n", ter0, ter1, lprev_ter0, lprev_ter1);
printf("(ter0-prev_ter0=%f, %f ter1-prev_ter1=%f, %f) \n", fabs(ter0-lprev_ter0), 0.1*ter0, fabs(ter1-lprev_ter1), 0.1*ter1);
break;
}
lprev_ter0=ter0;
lprev_ter1=ter1;
}
if (prev_ter0 != -1){ // Flasg to indicate that TER for two configurations have been calculated
double ws = 0.5*(ter0/prev_ter0 + ter1/prev_ter1);
printf("WS Conf(%d->%d) %f\n", index, prev_index, ws);
if (search == START) { //If first ws calculated calculated
if (ws < 1) { // change direction of the search
search = BACKWARD;
ter0 = prev_ter0;
ter1 = prev_ter1;
index = prev_index;
b0 = prev_b0;
b1 = prev_b1;
}
else
search = FORWARD;
}
else
if (ws < 1) {
printf("Best configuration achieved %s/%s=(%d,%d)\n", name0, name1, smk_conc[kid0][kid1].pairs[prev_index][0], smk_conc[kid0][kid1].pairs[prev_index][1]);
break;
}
}
// Save values from current configuration
prev_index = index;
prev_ter0 = ter0;
prev_ter1 = ter1;
prev_b0 = b0;
prev_b1 = b1;
// Change search config
if (search == START || search == FORWARD)
index = index +1; //New condifuracion
else
index = index-1;
if (index >= num_confs || index < 0) {
printf("Configuracion extrema alcanzada %s/%s=(%d,%d)\n", name0, name1, smk_conc[kid0][kid1].pairs[prev_index][0], smk_conc[kid0][kid1].pairs[prev_index][1]);
break;
}
b0 = smk_conc[kid0][kid1].pairs[index][0];
b1 = smk_conc[kid0][kid1].pairs[index][1];
//printf("PROF: Conf(%d,%d)\n", b0, b1);
if (b0 < coexec.num_streams[0]){ // If k0 has less BSUs
evict_streams(coexec.kstr[0], coexec.num_streams[0] - b0); // Evict BSU(s)
//printf("Conf=%d Eviciting %d BSUs the kernel 0\n", index, coexec.num_streams[0] - b0);
coexec.num_streams[0] -= (coexec.num_streams[0] - b0);
//printf("Conf=%d Lanzando %d BSUs the kernel 1\n", index, b1 - coexec.num_streams[1]);
add_streams_to_kernel(&coexec, &sched, coexec.kstr[1], b1-coexec.num_streams[1]);
launch_coexec(&coexec);
}
else { // b1 has less BSUs
evict_streams(coexec.kstr[1], coexec.num_streams[1] - b1); // Evict BSU(s)
//printf("Conf=%d Eviciting %d BSUs the kernel 1\n", index, coexec.num_streams[1] - b1);
coexec.num_streams[1] -= (coexec.num_streams[1] - b1);
//printf("Conf=%d Lanzando %d BSUs the kernel 0\n", index, b0 - coexec.num_streams[0]);
add_streams_to_kernel(&coexec, &sched, coexec.kstr[0], b0-coexec.num_streams[0]);
launch_coexec(&coexec);
}
}
// Read executed tasks during profiling for each kernel
int executed_task0 = *(sched.cont_tasks_zc + 0);
int executed_task1 = *(sched.cont_tasks_zc + 1);
// Get tpms achieved for each kernel during solo execution
double tpms0 = smk_solo[kid0].tpms[smk_solo[kid0].num_configs-1];
double tpms1 = smk_solo[kid1].tpms[smk_solo[kid1].num_configs-1];
// Calculate the time taken for both kernels, sequentially executed, to run those tasks
double seq_time = (double)executed_task0/tpms0 + (double)executed_task1/tpms1;
// Get time to calculate profiling time;
clock_gettime(CLOCK_REALTIME, &now);
time1 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
// Compare profiling time with sequential time executing the sae number of tasks
printf("Label, K0/K1, Config, Ptime(ms), Stime(ms), HyperQtime (ms)\n");
printf("PROF:, %s-%s, %d-%d, %f, %f", name0, name1, smk_conc[kid0][kid1].pairs[prev_index][0], smk_conc[kid0][kid1].pairs[prev_index][1], (time1-prof_ini)*1000, seq_time);
// Evict proxy
sched.kernel_evict_zc[0] = PROXY_EVICT;
cudaDeviceSynchronize();
// Check sequential time using two streams
kstub0->kconf.max_persistent_blocks = 8;
int save_total_task0 = kstub0->total_tasks;
kstub0->total_tasks = executed_task0;
kstub1->kconf.max_persistent_blocks = 8;
int save_total_task1 = kstub1->total_tasks;
kstub1->total_tasks = executed_task1;
for (int i=0; i<MAX_STREAMS_PER_KERNEL; i++)
kstub0->h_state[i] = PREP;
cudaMemcpy(kstub0->gm_state, kstub0->h_state, sizeof(State) * MAX_STREAMS_PER_KERNEL, cudaMemcpyHostToDevice);
kstub0->execution_s = &(kstr[0].str[0]); //Stream id
kstub0->stream_index = 0; // Index used by kernel to test state of i-esimo stream
for (int i=0; i<MAX_STREAMS_PER_KERNEL; i++)
kstub1->h_state[i] = PREP;
cudaMemcpy(kstub1->gm_state, kstub1->h_state, sizeof(State) * MAX_STREAMS_PER_KERNEL, cudaMemcpyHostToDevice);
kstub1->execution_s = &(kstr[1].str[0]); //Stream id
kstub1->stream_index = 0; // Index used by kernel to test state of i-esimo stream
// Use all the SMs
int idSMs[2];
idSMs[0] = 0;idSMs[1] = kstr->kstub->kconf.numSMs-1;
kstub0->idSMs = idSMs;
kstub1->idSMs = idSMs;
cudaMemset(kstub0->d_executed_tasks, 0, sizeof(int));
cudaMemset(kstub1->d_executed_tasks, 0, sizeof(int));
// Get time to calculate profiling time;
clock_gettime(CLOCK_REALTIME, &now);
time1 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
(kstub0->launchCKEkernel)(kstub0);
(kstub1->launchCKEkernel)(kstub1);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &now);
time2 = (double)now.tv_sec+(double)now.tv_nsec*1e-9;
printf(", %f\n", (time2-time1)*1000);
// Restore original value
kstub0->total_tasks = save_total_task0;
kstub1->total_tasks = save_total_task1;
// Free
remove_kstreams(kstr);
free(kstr);
remove_coexec(&coexec);
return 0;
}
// Applications is composed of one or several kernels
typedef struct{
int num_kernels;
int index;
t_Kernel kid[8]; // Max: 8 kernels per application
t_kernel_stub* kstubs[8]; // One kernel stub per kernel
}t_application;
int online_profiler_overhead(t_Kernel *kid, int num_kernels, int deviceId)
{
struct timespec now;
cudaError_t err;
// Select device
cudaSetDevice(deviceId);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, deviceId);
printf("Device=%s\n", deviceProp.name);
/** Create commom streams for all kernels: two for asynchronous transfers, one for preemption commands*/
cudaStream_t *transfers_s;
transfers_s = (cudaStream_t *)calloc(2, sizeof(cudaStream_t));
for (int i=0;i<2;i++){
err = cudaStreamCreate(&transfers_s[i]);
checkCudaErrors(err);
}
cudaStream_t preemp_s;
checkCudaErrors(cudaStreamCreateWithFlags(&preemp_s, cudaStreamNonBlocking));
/** Create stubs ***/
int max_num_kernels=13;
t_kernel_stub **kstubs = (t_kernel_stub **)calloc(max_num_kernels, sizeof(t_kernel_stub*));
int cont=0;
for (int i=0; i<num_kernels; i++) {
create_stubinfo(&kstubs[cont++], deviceId, kid[i], transfers_s, &preemp_s);
if (kid[i] == GCEDD) {
create_stubinfo_with_params(&kstubs[cont++], deviceId, SCEDD, transfers_s, &preemp_s, kstubs[i]->params);
create_stubinfo_with_params(&kstubs[cont++], deviceId, NCEDD, transfers_s, &preemp_s, kstubs[i]->params);
create_stubinfo_with_params(&kstubs[cont++], deviceId, HCEDD, transfers_s, &preemp_s, kstubs[i]->params);
}
if (kid[i] == RCONV){
create_stubinfo_with_params(&kstubs[cont++], deviceId, CCONV, transfers_s, &preemp_s, kstubs[i]->params);
}
}
// Make allocation and HtD transfer for kernels
make_transfers(kstubs, cont);
for (int i=0; i<cont; i++)
for (int j=i+1; j<cont; j++)
pair_overhed(kstubs[i], kstubs[j]);
return 0;
} |
33a2305e2cc4e8012ad412a82d23df59f5e03f36.hip | // !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include "functions/hinge.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct HingeLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class HingeLossTest: public ::testing::TestWithParam<HingeLossInputs<T> > {
protected:
void SetUp() override {
params = ::testing::TestWithParam<HingeLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocator.reset(new defaultDeviceAllocator);
allocate(in, len);
allocate(out, 1);
allocate(out_lasso, 1);
allocate(out_ridge, 1);
allocate(out_elasticnet, 1);
allocate(out_grad, n_cols);
allocate(out_lasso_grad, n_cols);
allocate(out_ridge_grad, n_cols);
allocate(out_elasticnet_grad, n_cols);
allocate(out_ref, 1);
allocate(out_lasso_ref, 1);
allocate(out_ridge_ref, 1);
allocate(out_elasticnet_ref, 1);
allocate(out_grad_ref, n_cols);
allocate(out_lasso_grad_ref, n_cols);
allocate(out_ridge_grad_ref, n_cols);
allocate(out_elasticnet_grad_ref, n_cols);
allocate(labels, params.n_rows);
allocate(coef, params.n_cols);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
updateDevice(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
updateDevice(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
updateDevice(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {2.6037};
updateDevice(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {2.9577};
updateDevice(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {2.71176};
updateDevice(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {2.83473};
updateDevice(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.24333, -1.1933};
updateDevice(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.3566, -1.7933};
updateDevice(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols, stream);
T h_out_ridge_grad_ref[n_cols] = {0.1766, -1.4813};
updateDevice(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols, stream);
T h_out_elasticnet_grad_ref[n_cols] = {0.2666, -1.63733};
updateDevice(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref, n_cols, stream);
T alpha = 0.6;
T l1_ratio = 0.5;
hingeLoss(in, params.n_rows, params.n_cols, labels, coef, out, penalty::NONE,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
hingeLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_grad, penalty::NONE,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
hingeLoss(in, params.n_rows, params.n_cols, labels, coef, out_lasso, penalty::L1,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
hingeLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_lasso_grad, penalty::L1,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
hingeLoss(in, params.n_rows, params.n_cols, labels, coef, out_ridge, penalty::L2,
alpha, l1_ratio, cublas_handle, allocator, stream);
hingeLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_ridge_grad, penalty::L2,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
hingeLoss(in, params.n_rows, params.n_cols, labels, coef, out_elasticnet, penalty::ELASTICNET,
alpha, l1_ratio, cublas_handle, allocator, stream);
hingeLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_elasticnet_grad, penalty::ELASTICNET,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(coef));
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(out_lasso));
CUDA_CHECK(hipFree(out_ridge));
CUDA_CHECK(hipFree(out_elasticnet));
CUDA_CHECK(hipFree(out_grad));
CUDA_CHECK(hipFree(out_lasso_grad));
CUDA_CHECK(hipFree(out_ridge_grad));
CUDA_CHECK(hipFree(out_elasticnet_grad));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out_lasso_ref));
CUDA_CHECK(hipFree(out_ridge_ref));
CUDA_CHECK(hipFree(out_elasticnet_ref));
CUDA_CHECK(hipFree(out_grad_ref));
CUDA_CHECK(hipFree(out_lasso_grad_ref));
CUDA_CHECK(hipFree(out_ridge_grad_ref));
CUDA_CHECK(hipFree(out_elasticnet_grad_ref));
}
protected:
HingeLossInputs<T> params;
T *in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref, *out_elasticnet_grad_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<HingeLossInputs<float> > inputsf = {
{0.01f, 3, 2, 6}
};
const std::vector<HingeLossInputs<double> > inputsd = {
{0.01, 3, 2, 6}
};
typedef HingeLossTest<float> HingeLossTestF;
TEST_P(HingeLossTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
}
typedef HingeLossTest<double> HingeLossTestD;
TEST_P(HingeLossTestD, Result){
ASSERT_TRUE(devArrMatch(out_ref, out, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(HingeLossTests, HingeLossTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(HingeLossTests, HingeLossTestD, ::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
| 33a2305e2cc4e8012ad412a82d23df59f5e03f36.cu | #include <gtest/gtest.h>
#include "functions/hinge.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct HingeLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class HingeLossTest: public ::testing::TestWithParam<HingeLossInputs<T> > {
protected:
void SetUp() override {
params = ::testing::TestWithParam<HingeLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocator.reset(new defaultDeviceAllocator);
allocate(in, len);
allocate(out, 1);
allocate(out_lasso, 1);
allocate(out_ridge, 1);
allocate(out_elasticnet, 1);
allocate(out_grad, n_cols);
allocate(out_lasso_grad, n_cols);
allocate(out_ridge_grad, n_cols);
allocate(out_elasticnet_grad, n_cols);
allocate(out_ref, 1);
allocate(out_lasso_ref, 1);
allocate(out_ridge_ref, 1);
allocate(out_elasticnet_ref, 1);
allocate(out_grad_ref, n_cols);
allocate(out_lasso_grad_ref, n_cols);
allocate(out_ridge_grad_ref, n_cols);
allocate(out_elasticnet_grad_ref, n_cols);
allocate(labels, params.n_rows);
allocate(coef, params.n_cols);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
updateDevice(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
updateDevice(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
updateDevice(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {2.6037};
updateDevice(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {2.9577};
updateDevice(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {2.71176};
updateDevice(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {2.83473};
updateDevice(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.24333, -1.1933};
updateDevice(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.3566, -1.7933};
updateDevice(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols, stream);
T h_out_ridge_grad_ref[n_cols] = {0.1766, -1.4813};
updateDevice(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols, stream);
T h_out_elasticnet_grad_ref[n_cols] = {0.2666, -1.63733};
updateDevice(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref, n_cols, stream);
T alpha = 0.6;
T l1_ratio = 0.5;
hingeLoss(in, params.n_rows, params.n_cols, labels, coef, out, penalty::NONE,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
hingeLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_grad, penalty::NONE,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
hingeLoss(in, params.n_rows, params.n_cols, labels, coef, out_lasso, penalty::L1,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
hingeLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_lasso_grad, penalty::L1,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
hingeLoss(in, params.n_rows, params.n_cols, labels, coef, out_ridge, penalty::L2,
alpha, l1_ratio, cublas_handle, allocator, stream);
hingeLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_ridge_grad, penalty::L2,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
hingeLoss(in, params.n_rows, params.n_cols, labels, coef, out_elasticnet, penalty::ELASTICNET,
alpha, l1_ratio, cublas_handle, allocator, stream);
hingeLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_elasticnet_grad, penalty::ELASTICNET,
alpha, l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(coef));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(out_lasso));
CUDA_CHECK(cudaFree(out_ridge));
CUDA_CHECK(cudaFree(out_elasticnet));
CUDA_CHECK(cudaFree(out_grad));
CUDA_CHECK(cudaFree(out_lasso_grad));
CUDA_CHECK(cudaFree(out_ridge_grad));
CUDA_CHECK(cudaFree(out_elasticnet_grad));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out_lasso_ref));
CUDA_CHECK(cudaFree(out_ridge_ref));
CUDA_CHECK(cudaFree(out_elasticnet_ref));
CUDA_CHECK(cudaFree(out_grad_ref));
CUDA_CHECK(cudaFree(out_lasso_grad_ref));
CUDA_CHECK(cudaFree(out_ridge_grad_ref));
CUDA_CHECK(cudaFree(out_elasticnet_grad_ref));
}
protected:
HingeLossInputs<T> params;
T *in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref, *out_elasticnet_grad_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<HingeLossInputs<float> > inputsf = {
{0.01f, 3, 2, 6}
};
const std::vector<HingeLossInputs<double> > inputsd = {
{0.01, 3, 2, 6}
};
typedef HingeLossTest<float> HingeLossTestF;
TEST_P(HingeLossTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
}
typedef HingeLossTest<double> HingeLossTestD;
TEST_P(HingeLossTestD, Result){
ASSERT_TRUE(devArrMatch(out_ref, out, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(HingeLossTests, HingeLossTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(HingeLossTests, HingeLossTestD, ::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.