hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
b27a02ea496c5104b62b9676a286ce0b4ec2503e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define RADIUS 3
#define BLOCK_SIZE 256
#define NUM_ELEMENTS (4096*2)
#define MAX 49152
// CUDA API error checking macro
#define cudaCheck(error) \
if (error != hipSuccess) { \
printf("Fatal error: %s at %s:%d\n", \
hipGetErrorString(error), \
__FILE__, __LINE__); \
exit(1); \
}
__global__ void stencil_1d(int *in, int *out)
{
//__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
// int = 4 bit
// 49152/4 = 12288
__shared__ int temp[12289];
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS;
int lindex = threadIdx.x + RADIUS;
// Read input elements into shared memory
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS)
{
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
// Make sure all threads get to this point before proceeding!
__syncthreads();
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex-RADIUS] = result;
}
int main()
{
unsigned int i;
int h_in[NUM_ELEMENTS + 2 * RADIUS], h_out[NUM_ELEMENTS];
int *d_in, *d_out;
// Initialize host data
for( i = 0; i < (NUM_ELEMENTS + 2*RADIUS); ++i )
h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7
// Allocate space on the device
cudaCheck( hipMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int)) );
cudaCheck( hipMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) );
// Copy input data to device
cudaCheck( hipMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), hipMemcpyHostToDevice) );
int blockNum = (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE;
printf("%d\n", blockNum);
hipLaunchKernelGGL(( stencil_1d), dim3(blockNum), dim3(BLOCK_SIZE) , 0, 0, d_in, d_out);
cudaCheck( hipMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), hipMemcpyDeviceToHost) );
// Verify every out value is 7
for( i = 0; i < NUM_ELEMENTS; ++i )
if (h_out[i] != 7)
{
printf("Element h_out[%d] == %d != 7\n", i, h_out[i]);
break;
}
if (i == NUM_ELEMENTS)
printf("SUCCESS!\n");
// Free out memory
hipFree(d_in);
hipFree(d_out);
return 0;
}
| b27a02ea496c5104b62b9676a286ce0b4ec2503e.cu | #include <stdio.h>
#define RADIUS 3
#define BLOCK_SIZE 256
#define NUM_ELEMENTS (4096*2)
#define MAX 49152
// CUDA API error checking macro
#define cudaCheck(error) \
if (error != cudaSuccess) { \
printf("Fatal error: %s at %s:%d\n", \
cudaGetErrorString(error), \
__FILE__, __LINE__); \
exit(1); \
}
__global__ void stencil_1d(int *in, int *out)
{
//__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
// int = 4 bit
// 49152/4 = 12288
__shared__ int temp[12289];
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS;
int lindex = threadIdx.x + RADIUS;
// Read input elements into shared memory
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS)
{
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
// Make sure all threads get to this point before proceeding!
__syncthreads();
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex-RADIUS] = result;
}
int main()
{
unsigned int i;
int h_in[NUM_ELEMENTS + 2 * RADIUS], h_out[NUM_ELEMENTS];
int *d_in, *d_out;
// Initialize host data
for( i = 0; i < (NUM_ELEMENTS + 2*RADIUS); ++i )
h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7
// Allocate space on the device
cudaCheck( cudaMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int)) );
cudaCheck( cudaMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) );
// Copy input data to device
cudaCheck( cudaMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), cudaMemcpyHostToDevice) );
int blockNum = (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE;
printf("%d\n", blockNum);
stencil_1d<<< blockNum, BLOCK_SIZE >>> (d_in, d_out);
cudaCheck( cudaMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost) );
// Verify every out value is 7
for( i = 0; i < NUM_ELEMENTS; ++i )
if (h_out[i] != 7)
{
printf("Element h_out[%d] == %d != 7\n", i, h_out[i]);
break;
}
if (i == NUM_ELEMENTS)
printf("SUCCESS!\n");
// Free out memory
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
f940ed4fbe64e1502b8a1cf437dc3b602de97799.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template <typename T, typename Operator>
__global__ void transform(const T* input1, const T* input2, T* out, const size_t numItems)
{
Operator op;
#pragma unroll UNROLL_FACTOR
for (int element = 0; element < ELEMENTS_PER_THREAD; ++element)
{
#ifdef MEMORY_MAPPING_PATTERN == 0
const size_t index = ELEMENTS_PER_THREAD * (blockIdx.x * blockDim.x + threadIdx.x);
const size_t offsetIndex = index + element;
#elif MEMORY_MAPPING_PATTERN == 1
const size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const size_t offsetIndex = index + ELEMENTS_PER_THREAD * blockDim.x * element;
#endif
if (offsetIndex < numItems)
{
out[offsetIndex] = op(input1[offsetIndex], input2[offsetIndex]);
}
}
};
| f940ed4fbe64e1502b8a1cf437dc3b602de97799.cu | template <typename T, typename Operator>
__global__ void transform(const T* input1, const T* input2, T* out, const size_t numItems)
{
Operator op;
#pragma unroll UNROLL_FACTOR
for (int element = 0; element < ELEMENTS_PER_THREAD; ++element)
{
#ifdef MEMORY_MAPPING_PATTERN == 0
const size_t index = ELEMENTS_PER_THREAD * (blockIdx.x * blockDim.x + threadIdx.x);
const size_t offsetIndex = index + element;
#elif MEMORY_MAPPING_PATTERN == 1
const size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const size_t offsetIndex = index + ELEMENTS_PER_THREAD * blockDim.x * element;
#endif
if (offsetIndex < numItems)
{
out[offsetIndex] = op(input1[offsetIndex], input2[offsetIndex]);
}
}
};
|
0fb6b0df7aa118e769f558abec7c9f0596c39c44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaRelated.h"
#include <cutil.h>
#include <cutil_inline.h>
#include "cudaKernel.cu"
#include <iostream>
using namespace std;
void memAlloc(void ** address, int size) {
hipHostMalloc(address, size, hipHostMallocDefault);
}
void memDealloc(void * address) {
if (address != 0)
hipHostFree(address);
}
void cudaMemAlloc(void **address, int size,bool isMemset) {
hipMalloc(address, size);
if(isMemset == true){
hipMemset(address, 0, size);
}
}
void cudaMemFree(void * address) {
if (address != 0)
hipFree(address);
}
void cudaMemoryCopy(void * h_address, void * d_address,int size){
hipMemcpy(d_address, h_address,size, hipMemcpyHostToDevice);
}
void cudaDistance(cudaGrid * h_cudaArray, cudaGrid * d_cudeArray, cudaGrid * h_curGrid, cudaGrid * d_curGrid, int size, int * d_result, int * h_result,int type,int * h_pointCnt,int * d_pointCnt) {
unsigned int timer = 0;
cutCreateTimer(&timer);
cutStartTimer(timer);
hipMemset(d_result,0,sizeof(int)*size);
hipLaunchKernelGGL(( expand), dim3(size/MAXTHREAD),dim3(MAXTHREAD), 0, 0, d_cudeArray,d_curGrid,d_result,type,d_pointCnt,size,eps,DIMENSION,minPts);
hipMemcpy(h_pointCnt,d_pointCnt,sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(h_result, d_result, sizeof(int)*size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
cutStopTimer(timer);
//cout << "timer = " << cutGetTimerValue(timer) << endl;
}
void cudaMap(cudaGrid * hipArray, int localGridNum, int * gridnum, int * stride1d, int * stride2d, int * h_intermediate)//h_intermediate will be allocated outside.
{
unsigned int timer = 0;
for (int i = 0; i < 100; i++) {
cout << h_intermediate[i] << ",";
}
cout << endl;
int globalGridNum = stride1d[localGridNum - 1] + gridnum[localGridNum - 1];
int globalGridNum2 = stride2d[localGridNum - 1] + gridnum[localGridNum - 1] * gridnum[localGridNum - 1];
cutCreateTimer(&timer);
cutStartTimer(timer);
cudaGrid * d_cudaGrid = 0;
hipMalloc((void**) &d_cudaGrid, sizeof(cudaGrid) * globalGridNum);
hipMemset(d_cudaGrid, 0, sizeof(cudaGrid) * globalGridNum);
hipMemcpy(d_cudaGrid, hipArray, sizeof(cudaGrid) * globalGridNum, hipMemcpyHostToDevice);
int * d_intermediate = 0;
hipMalloc((void**) &d_intermediate, sizeof(int) * globalGridNum2);
hipMemset(d_intermediate, 0, sizeof(int) * globalGridNum2);
int * d_gridnum = 0;
hipMalloc((void**) &d_gridnum, sizeof(int) * localGridNum);
hipMemcpy(d_gridnum, gridnum, sizeof(int) * localGridNum, hipMemcpyHostToDevice);
int * d_stride1d = 0;
hipMalloc((void**) &d_stride1d, sizeof(int) * localGridNum);
hipMemcpy(d_stride1d, stride1d, sizeof(int) * localGridNum, hipMemcpyHostToDevice);
int * d_stride2d = 0;
hipMalloc((void**) &d_stride2d, sizeof(int) * localGridNum);
hipMemcpy(d_stride2d, stride2d, sizeof(int) * localGridNum, hipMemcpyHostToDevice);
hipDeviceSynchronize();
dim3 blocksize(localGridNum, MAXTHREAD);
hipLaunchKernelGGL(( mapGrid), dim3(blocksize),dim3(MAXTHREAD), 0, 0, d_cudaGrid,d_intermediate,d_gridnum,d_stride1d,d_stride2d);
hipDeviceSynchronize();
hipMemcpy(h_intermediate, d_intermediate, sizeof(int) * globalGridNum2, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
cutStopTimer(timer);
cout << "timer = " << cutGetTimerValue(timer) << endl;
hipFree(d_cudaGrid);
hipFree(d_intermediate);
hipFree(d_gridnum);
hipFree(d_stride1d);
hipFree(d_stride2d);
}
| 0fb6b0df7aa118e769f558abec7c9f0596c39c44.cu | #include "cudaRelated.h"
#include <cutil.h>
#include <cutil_inline.h>
#include "cudaKernel.cu"
#include <iostream>
using namespace std;
void memAlloc(void ** address, int size) {
cudaHostAlloc(address, size, cudaHostAllocDefault);
}
void memDealloc(void * address) {
if (address != 0)
cudaFreeHost(address);
}
void cudaMemAlloc(void **address, int size,bool isMemset) {
cudaMalloc(address, size);
if(isMemset == true){
cudaMemset(address, 0, size);
}
}
void cudaMemFree(void * address) {
if (address != 0)
cudaFree(address);
}
void cudaMemoryCopy(void * h_address, void * d_address,int size){
cudaMemcpy(d_address, h_address,size, cudaMemcpyHostToDevice);
}
void cudaDistance(cudaGrid * h_cudaArray, cudaGrid * d_cudeArray, cudaGrid * h_curGrid, cudaGrid * d_curGrid, int size, int * d_result, int * h_result,int type,int * h_pointCnt,int * d_pointCnt) {
unsigned int timer = 0;
cutCreateTimer(&timer);
cutStartTimer(timer);
cudaMemset(d_result,0,sizeof(int)*size);
expand<<<size/MAXTHREAD,MAXTHREAD>>>(d_cudeArray,d_curGrid,d_result,type,d_pointCnt,size,eps,DIMENSION,minPts);
cudaMemcpy(h_pointCnt,d_pointCnt,sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(h_result, d_result, sizeof(int)*size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cutStopTimer(timer);
//cout << "timer = " << cutGetTimerValue(timer) << endl;
}
void cudaMap(cudaGrid * cudaArray, int localGridNum, int * gridnum, int * stride1d, int * stride2d, int * h_intermediate)//h_intermediate will be allocated outside.
{
unsigned int timer = 0;
for (int i = 0; i < 100; i++) {
cout << h_intermediate[i] << ",";
}
cout << endl;
int globalGridNum = stride1d[localGridNum - 1] + gridnum[localGridNum - 1];
int globalGridNum2 = stride2d[localGridNum - 1] + gridnum[localGridNum - 1] * gridnum[localGridNum - 1];
cutCreateTimer(&timer);
cutStartTimer(timer);
cudaGrid * d_cudaGrid = 0;
cudaMalloc((void**) &d_cudaGrid, sizeof(cudaGrid) * globalGridNum);
cudaMemset(d_cudaGrid, 0, sizeof(cudaGrid) * globalGridNum);
cudaMemcpy(d_cudaGrid, cudaArray, sizeof(cudaGrid) * globalGridNum, cudaMemcpyHostToDevice);
int * d_intermediate = 0;
cudaMalloc((void**) &d_intermediate, sizeof(int) * globalGridNum2);
cudaMemset(d_intermediate, 0, sizeof(int) * globalGridNum2);
int * d_gridnum = 0;
cudaMalloc((void**) &d_gridnum, sizeof(int) * localGridNum);
cudaMemcpy(d_gridnum, gridnum, sizeof(int) * localGridNum, cudaMemcpyHostToDevice);
int * d_stride1d = 0;
cudaMalloc((void**) &d_stride1d, sizeof(int) * localGridNum);
cudaMemcpy(d_stride1d, stride1d, sizeof(int) * localGridNum, cudaMemcpyHostToDevice);
int * d_stride2d = 0;
cudaMalloc((void**) &d_stride2d, sizeof(int) * localGridNum);
cudaMemcpy(d_stride2d, stride2d, sizeof(int) * localGridNum, cudaMemcpyHostToDevice);
cudaThreadSynchronize();
dim3 blocksize(localGridNum, MAXTHREAD);
mapGrid<<<blocksize,MAXTHREAD>>>(d_cudaGrid,d_intermediate,d_gridnum,d_stride1d,d_stride2d);
cudaThreadSynchronize();
cudaMemcpy(h_intermediate, d_intermediate, sizeof(int) * globalGridNum2, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cutStopTimer(timer);
cout << "timer = " << cutGetTimerValue(timer) << endl;
cudaFree(d_cudaGrid);
cudaFree(d_intermediate);
cudaFree(d_gridnum);
cudaFree(d_stride1d);
cudaFree(d_stride2d);
}
|
8a9a65e97ca5ee251a3708dba6d3e4bd28cd660e.hip | // !!! This is a file automatically generated by hipify!!!
/**********************************************************************************
This code performs a calculation of pi using the monte carlo method
using cuda GPU parallelisation.
Created by: George Tall
Email: george.tall@seh.ox.ac.uk
/*********************************************************************************/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
__global__ void point_test(int *N, float *d_x, float *d_y, float *d_R, float *d_A){
long int index = blockDim.x*blockIdx.x + threadIdx.x;
//printf("%d\n", index);
// Now each value of d_R is computed
d_R[index] = d_x[index]*d_x[index] + d_y[index]*d_y[index];
//printf("Thread %d d_R is %f \n", index, d_R[index]);
// sync threads at this point to prevent deadlock
__syncthreads();
if(d_R[index] < 1.0f) atomicAdd(&d_A[blockIdx.x], 1);
//printf("\nPoints in block %d = %d", blockIdx.x, d_A[blockIdx.x]);
}
__global__ void area_reduction(float *d_A){
// allocate shared memory
extern __shared__ float shared_array[];
// copy passed array into shared array
int tid = threadIdx.x;
//long int index = blockIdx.x*blockDim.x + threadIdx.x;
shared_array[tid] = d_A[tid];
__syncthreads();
for(long int d = blockDim.x/2; d > 0; d /= 2){
if(tid < d){
atomicAdd(&shared_array[tid], shared_array[tid+d]);
}
__syncthreads();
}
__syncthreads();
// if you're the first thread get the value from shared array
if(tid == 0){
d_A[0] = shared_array[0];
}
}
int main() {
// N is the number of random points.
// area stores the number of random points that fall into
// the area of the quadrant of a circle of radius 1
//size_t N = 2^10;
int N = 6536;
float area=0;
//initalize the GPU
int nBlocks = N/256;
int nThreads = 256;
int deviceid = 0; // using GPU with id 0
int devCount;
// gets number of GPU available
hipGetDeviceCount(&devCount);
// check if we have enough GPUs
if(deviceid<devCount) {
// tell CUDA that we want to use GPU 0
hipSetDevice(deviceid);
}
else return(1);
//random variable gen
hiprandGenerator_t gen;
//pointers to host memory and device memory we have a pointer for a radius in the device to calculate that before conditional is operated
//we are also going to have an area count per block to prevent confusion in the kernal if statement later
float *h_x, *h_y, *h_A;
float *d_x, *d_y, *d_R, *d_A;
//allocate host memory
h_x = (float*)malloc(N*sizeof(float));
h_y = (float*)malloc(N*sizeof(float));
h_A = (float*)malloc(nBlocks*sizeof(float));
//allocate device memory
hipMalloc((void**)&d_x, N*sizeof(float));
hipMalloc((void**)&d_y, N*sizeof(float));
hipMalloc((void**)&d_R, N*sizeof(float));
hipMalloc((void**)&d_A, nBlocks*sizeof(float));
// Create a pseudo-random number generator
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
// Set a seed
hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
// Generate N pseudo random numbers on device for x
hiprandGenerateUniform(gen, d_x, N);
hiprandGenerateUniform(gen, d_y, N);
// Kernal for testing if points lie in area or not
hipLaunchKernelGGL(( point_test), dim3(nBlocks), dim3(nThreads), 0, 0, &N, d_x, d_y, d_R, d_A);
// Syncronise the device here in order for all the blocks to finish calculating their area data points
hipDeviceSynchronize();
// Kernal for reducing the sum of the areas of the blocks
hipLaunchKernelGGL(( area_reduction), dim3(nBlocks/nThreads), dim3(nThreads), nBlocks*sizeof(float), 0, d_A);
//Copy the generated numbers back to host
// I've bought the other data back aside from h_A because it appears my reduction doesn't work
hipMemcpy(h_x, d_x, N*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(h_y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(h_A, d_A, nBlocks*sizeof(float), hipMemcpyDeviceToHost);
for(int i = 0; i < nBlocks; i++){
printf("%f \n",h_A[i]);
}
area = h_A[0];
printf("\nPi from reduction:\t%f\n", (4.0*area)/(float)N);
// reset area to zero so that we can do a monte carlo method on the host, this now limits the number of points we can use
area = 0;
for(int i = 0; i < N; i++){
if(h_x[i]*h_x[i] + h_y[i]*h_y[i] < 1.0f) area++;
}
printf("\nPi from host:\t%f\n", (4.0*area)/(float)N);
/*
area = 0;
for(int i=0; i<N; i++) {
double x = ((double)rand())/RAND_MAX;
double y = ((double)rand())/RAND_MAX;
if(x*x + y*y <= 1.0) area++;
}
printf("\nPi:\t%f\n", (4.0*area)/(double)N);
*/
// Free memory on host and device
hipFree(d_x); hipFree(d_y); hipFree(d_R); hipFree(d_A);
free(h_x); free(h_y); free(h_A);
return(0);
}
| 8a9a65e97ca5ee251a3708dba6d3e4bd28cd660e.cu | /**********************************************************************************
This code performs a calculation of pi using the monte carlo method
using cuda GPU parallelisation.
Created by: George Tall
Email: george.tall@seh.ox.ac.uk
/*********************************************************************************/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
__global__ void point_test(int *N, float *d_x, float *d_y, float *d_R, float *d_A){
long int index = blockDim.x*blockIdx.x + threadIdx.x;
//printf("%d\n", index);
// Now each value of d_R is computed
d_R[index] = d_x[index]*d_x[index] + d_y[index]*d_y[index];
//printf("Thread %d d_R is %f \n", index, d_R[index]);
// sync threads at this point to prevent deadlock
__syncthreads();
if(d_R[index] < 1.0f) atomicAdd(&d_A[blockIdx.x], 1);
//printf("\nPoints in block %d = %d", blockIdx.x, d_A[blockIdx.x]);
}
__global__ void area_reduction(float *d_A){
// allocate shared memory
extern __shared__ float shared_array[];
// copy passed array into shared array
int tid = threadIdx.x;
//long int index = blockIdx.x*blockDim.x + threadIdx.x;
shared_array[tid] = d_A[tid];
__syncthreads();
for(long int d = blockDim.x/2; d > 0; d /= 2){
if(tid < d){
atomicAdd(&shared_array[tid], shared_array[tid+d]);
}
__syncthreads();
}
__syncthreads();
// if you're the first thread get the value from shared array
if(tid == 0){
d_A[0] = shared_array[0];
}
}
int main() {
// N is the number of random points.
// area stores the number of random points that fall into
// the area of the quadrant of a circle of radius 1
//size_t N = 2^10;
int N = 6536;
float area=0;
//initalize the GPU
int nBlocks = N/256;
int nThreads = 256;
int deviceid = 0; // using GPU with id 0
int devCount;
// gets number of GPU available
cudaGetDeviceCount(&devCount);
// check if we have enough GPUs
if(deviceid<devCount) {
// tell CUDA that we want to use GPU 0
cudaSetDevice(deviceid);
}
else return(1);
//random variable gen
curandGenerator_t gen;
//pointers to host memory and device memory we have a pointer for a radius in the device to calculate that before conditional is operated
//we are also going to have an area count per block to prevent confusion in the kernal if statement later
float *h_x, *h_y, *h_A;
float *d_x, *d_y, *d_R, *d_A;
//allocate host memory
h_x = (float*)malloc(N*sizeof(float));
h_y = (float*)malloc(N*sizeof(float));
h_A = (float*)malloc(nBlocks*sizeof(float));
//allocate device memory
cudaMalloc((void**)&d_x, N*sizeof(float));
cudaMalloc((void**)&d_y, N*sizeof(float));
cudaMalloc((void**)&d_R, N*sizeof(float));
cudaMalloc((void**)&d_A, nBlocks*sizeof(float));
// Create a pseudo-random number generator
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
// Set a seed
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
// Generate N pseudo random numbers on device for x
curandGenerateUniform(gen, d_x, N);
curandGenerateUniform(gen, d_y, N);
// Kernal for testing if points lie in area or not
point_test<<<nBlocks, nThreads>>>(&N, d_x, d_y, d_R, d_A);
// Syncronise the device here in order for all the blocks to finish calculating their area data points
cudaDeviceSynchronize();
// Kernal for reducing the sum of the areas of the blocks
area_reduction<<<nBlocks/nThreads, nThreads, nBlocks*sizeof(float)>>>(d_A);
//Copy the generated numbers back to host
// I've bought the other data back aside from h_A because it appears my reduction doesn't work
cudaMemcpy(h_x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_A, d_A, nBlocks*sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 0; i < nBlocks; i++){
printf("%f \n",h_A[i]);
}
area = h_A[0];
printf("\nPi from reduction:\t%f\n", (4.0*area)/(float)N);
// reset area to zero so that we can do a monte carlo method on the host, this now limits the number of points we can use
area = 0;
for(int i = 0; i < N; i++){
if(h_x[i]*h_x[i] + h_y[i]*h_y[i] < 1.0f) area++;
}
printf("\nPi from host:\t%f\n", (4.0*area)/(float)N);
/*
area = 0;
for(int i=0; i<N; i++) {
double x = ((double)rand())/RAND_MAX;
double y = ((double)rand())/RAND_MAX;
if(x*x + y*y <= 1.0) area++;
}
printf("\nPi:\t%f\n", (4.0*area)/(double)N);
*/
// Free memory on host and device
cudaFree(d_x); cudaFree(d_y); cudaFree(d_R); cudaFree(d_A);
free(h_x); free(h_y); free(h_A);
return(0);
}
|
49929de89da5633f4bd0274728693ee3b2b0190b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime_api.h>
#include<cuda.h>
#include <stdio.h>
#include "Matrix.h"
#include <vector>
#include<random>
#include<iostream>
#include <hip/device_functions.h>
#include<time.h>
using namespace std;
void load_matrix_to_device(Matrix* pA, Matrix* pdA) {
int ht = pA->height;
int wt = pA->width;
pdA->width = wt;
pdA->height = ht;
size_t size = ht*wt * sizeof(double);
hipMalloc(&(pdA->array_), size);
hipMemcpy(pdA->array_, pA->array_, size, hipMemcpyHostToDevice);
}
void load_matrix_to_host(Matrix* pA, Matrix*pdA) {
int ht = pdA->height;
int wt = pdA->width;
size_t size = ht*wt * sizeof(double);
hipMemcpy(pA->array_, pdA->array_, size, hipMemcpyDeviceToHost);
}
__global__ void MatMulKernel (Matrix d_W,Matrix d_H, Matrix d_AR) {
int row = threadIdx.y + blockIdx.y*blockDim.y; // t
int col = threadIdx.x + blockIdx.x*blockDim.x;
double res = 0;
for (int e = 0; e < d_W.width; ++e) {
res += d_W.array_[row * d_W.width + e] * d_H.array_[col * d_H.width + e];
}
d_AR.array_[row * d_AR.width + col] = res;
}
__global__ void CCDKernel(Matrix A, Matrix W, Matrix R, Matrix H, double lambda) {
int row = threadIdx.y + blockIdx.y*blockDim.y; // t
int col = threadIdx.x + blockIdx.x*blockDim.x; // the same for each block of k threads , i
int m = A.height;
int n = A.width;
int k = W.width;
double z_star = 0;
double num_z_star = 0;
double denum_z_star = lambda;
double s_star = 0;
double num_s_star = 0;
double denum_s_star = lambda;
// update Rij for all j in omega_i
if (col < m) {
// we're still updating W
for (int j = 0; j < n; ++j) {
double res = 0;
if (A.array_[col*A.width + j] != 0.0){
for (int e = 0; e < k; ++e) {
res = res + W.array_[col*k + e] * H.array_[e*n + j];
}
R.array_[col*n + j] = A.array_[col*n + j] - res;
num_z_star += (R.array_[col*n + j] + W.array_[col*W.width + row] * H.array_[j*H.width + row])*H.array_[j*H.width + row];
denum_z_star += H.array_[j*H.width + row] * H.array_[j*H.width + row];
}
}
// Rij update for all j in omega_i ( i =col )
z_star = num_z_star / denum_z_star;
for (int j = 0; j < n; ++j) {
if (A.array_[col*A.width + j] != 0.0) {
R.array_[col*A.width + j] = R.array_[col*A.width + j] - (z_star - W.array_[col*W.width + row])*H.array_[j*H.width + row];
}
}
W.array_[col*k + row] = z_star;
}
// we must synchronyze threads before updating H
void __syncthreads();
if ( col >= m ) {
// we're now updating H
for (int i = 0; i < m; ++i) {
if (A.array_[i*A.width + col-m] != 0.0) {
num_s_star += (R.array_[i*A.width + col-m] + W.array_[i*W.width + row] * H.array_[(col-m)*H.width + row])*W.array_[i*W.width + row];
denum_s_star += W.array_[i*W.width + row] * W.array_[i*W.width + row];
}
}
// Rij update for all j in omega_i ( i =col )
s_star = num_s_star / denum_s_star;
for (int i = 0; i < m; ++i) {
if (A.array_[i*A.width + col-m] != 0) {
R.array_[i*A.width + col-m] = R.array_[i*A.width + col-m] - (s_star - H.array_[(col-m)*H.width + row])*W.array_[i*W.width + row];
}
}
H.array_[(col-m)*H.width + row] = s_star;
}
}
int main() {
for (int iter = 1; iter < 50; iter++) {
clock_t tStart = clock();
//int iter = 3;
double lambda = 500;
// height <-> rows , width <-> column
// matrix A is a squared matrix with missing values
// we first generate A randomly
double* ele = new double[9216];
double* el = new double[9216];
double* elem = new double[9216];
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(0, 5);
for (int i = 0; i < 9216; i++) {
double num_gene = dis(gen);
if (num_gene <= 1) {
ele[i] = 0.0;
}
else {
ele[i] = num_gene;
}
elem[i] = 0.0;
el[i] = num_gene;
}
Matrix A = Matrix(96, 96, ele);
Matrix W = Matrix(96, 96, elem); // zeros
Matrix H = Matrix(96, 96, el); // zeros
Matrix R = A;
// load A,W,H,R to the device memory
Matrix d_A;
Matrix d_W;
Matrix d_H;
Matrix d_R;
// Invoke kernel
//vector<double> error;
dim3 dimBlock(16, 16);
int gri_wd = W.height + H.height;
// verify division
dim3 dimGrid(12, 6);
for (int i = 0; i < iter; ++i) {
load_matrix_to_device(&A, &d_A);
//W
load_matrix_to_device(&W, &d_W);
//H
load_matrix_to_device(&H, &d_H);
// R
load_matrix_to_device(&R, &d_R);
CCDKernel << <dimGrid, dimBlock >> > (d_A, d_W, d_R, d_H, lambda);
// store error in host
load_matrix_to_host(&W, &d_W);
load_matrix_to_host(&H, &d_H);
load_matrix_to_host(&R, &d_R);
load_matrix_to_host(&A, &d_A);
/*double res = 0;
for (int i = 0; i < 10; i++) {
res += W.array_[70 + i] * H.array_[30 + i];
}
cout << "iter " << i << " A : " << A.array_[73] << " R : " << R.array_[73] << " res " << res << "\n" ;
*/
}
dim3 dimBlock1(16, 16);
dim3 dimGrid1(6, 6);
Matrix AR = Matrix(96, 96, elem);
Matrix d_AR;
load_matrix_to_device(&AR, &d_AR);
MatMulKernel << <dimGrid1, dimBlock1 >> > (d_W, d_H, d_AR);
load_matrix_to_host(&AR, &d_AR);
// Read W,H,R from device memory
double erro = 0;
for (int i = 0; i < 9216; i++) {
if (A.array_[i] != 0.0) {
erro += (AR.array_[i] - A.array_[i])*(AR.array_[i] - A.array_[i]);
}
//cout << "AR : " << AR.array_[i] << " A :" << A.array_[i] << "\n " ;
}
erro = erro / 9216;
cout << sqrt(erro) << " iter : " <<iter ;
printf("Time taken: %.2fs\n", (double)(clock() - tStart) / CLOCKS_PER_SEC);
// Free device memory
hipFree(d_W.array_); hipFree(d_H.array_); hipFree(d_R.array_); hipFree(d_A.array_);
}
system("pause");
} | 49929de89da5633f4bd0274728693ee3b2b0190b.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda_runtime_api.h>
#include<cuda.h>
#include <stdio.h>
#include "Matrix.h"
#include <vector>
#include<random>
#include<iostream>
#include <device_functions.h>
#include<time.h>
using namespace std;
void load_matrix_to_device(Matrix* pA, Matrix* pdA) {
int ht = pA->height;
int wt = pA->width;
pdA->width = wt;
pdA->height = ht;
size_t size = ht*wt * sizeof(double);
cudaMalloc(&(pdA->array_), size);
cudaMemcpy(pdA->array_, pA->array_, size, cudaMemcpyHostToDevice);
}
void load_matrix_to_host(Matrix* pA, Matrix*pdA) {
int ht = pdA->height;
int wt = pdA->width;
size_t size = ht*wt * sizeof(double);
cudaMemcpy(pA->array_, pdA->array_, size, cudaMemcpyDeviceToHost);
}
__global__ void MatMulKernel (Matrix d_W,Matrix d_H, Matrix d_AR) {
int row = threadIdx.y + blockIdx.y*blockDim.y; // t
int col = threadIdx.x + blockIdx.x*blockDim.x;
double res = 0;
for (int e = 0; e < d_W.width; ++e) {
res += d_W.array_[row * d_W.width + e] * d_H.array_[col * d_H.width + e];
}
d_AR.array_[row * d_AR.width + col] = res;
}
__global__ void CCDKernel(Matrix A, Matrix W, Matrix R, Matrix H, double lambda) {
int row = threadIdx.y + blockIdx.y*blockDim.y; // t
int col = threadIdx.x + blockIdx.x*blockDim.x; // the same for each block of k threads , i
int m = A.height;
int n = A.width;
int k = W.width;
double z_star = 0;
double num_z_star = 0;
double denum_z_star = lambda;
double s_star = 0;
double num_s_star = 0;
double denum_s_star = lambda;
// update Rij for all j in omega_i
if (col < m) {
// we're still updating W
for (int j = 0; j < n; ++j) {
double res = 0;
if (A.array_[col*A.width + j] != 0.0){
for (int e = 0; e < k; ++e) {
res = res + W.array_[col*k + e] * H.array_[e*n + j];
}
R.array_[col*n + j] = A.array_[col*n + j] - res;
num_z_star += (R.array_[col*n + j] + W.array_[col*W.width + row] * H.array_[j*H.width + row])*H.array_[j*H.width + row];
denum_z_star += H.array_[j*H.width + row] * H.array_[j*H.width + row];
}
}
// Rij update for all j in omega_i ( i =col )
z_star = num_z_star / denum_z_star;
for (int j = 0; j < n; ++j) {
if (A.array_[col*A.width + j] != 0.0) {
R.array_[col*A.width + j] = R.array_[col*A.width + j] - (z_star - W.array_[col*W.width + row])*H.array_[j*H.width + row];
}
}
W.array_[col*k + row] = z_star;
}
// we must synchronyze threads before updating H
void __syncthreads();
if ( col >= m ) {
// we're now updating H
for (int i = 0; i < m; ++i) {
if (A.array_[i*A.width + col-m] != 0.0) {
num_s_star += (R.array_[i*A.width + col-m] + W.array_[i*W.width + row] * H.array_[(col-m)*H.width + row])*W.array_[i*W.width + row];
denum_s_star += W.array_[i*W.width + row] * W.array_[i*W.width + row];
}
}
// Rij update for all j in omega_i ( i =col )
s_star = num_s_star / denum_s_star;
for (int i = 0; i < m; ++i) {
if (A.array_[i*A.width + col-m] != 0) {
R.array_[i*A.width + col-m] = R.array_[i*A.width + col-m] - (s_star - H.array_[(col-m)*H.width + row])*W.array_[i*W.width + row];
}
}
H.array_[(col-m)*H.width + row] = s_star;
}
}
int main() {
for (int iter = 1; iter < 50; iter++) {
clock_t tStart = clock();
//int iter = 3;
double lambda = 500;
// height <-> rows , width <-> column
// matrix A is a squared matrix with missing values
// we first generate A randomly
double* ele = new double[9216];
double* el = new double[9216];
double* elem = new double[9216];
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(0, 5);
for (int i = 0; i < 9216; i++) {
double num_gene = dis(gen);
if (num_gene <= 1) {
ele[i] = 0.0;
}
else {
ele[i] = num_gene;
}
elem[i] = 0.0;
el[i] = num_gene;
}
Matrix A = Matrix(96, 96, ele);
Matrix W = Matrix(96, 96, elem); // zeros
Matrix H = Matrix(96, 96, el); // zeros
Matrix R = A;
// load A,W,H,R to the device memory
Matrix d_A;
Matrix d_W;
Matrix d_H;
Matrix d_R;
// Invoke kernel
//vector<double> error;
dim3 dimBlock(16, 16);
int gri_wd = W.height + H.height;
// verify division
dim3 dimGrid(12, 6);
for (int i = 0; i < iter; ++i) {
load_matrix_to_device(&A, &d_A);
//W
load_matrix_to_device(&W, &d_W);
//H
load_matrix_to_device(&H, &d_H);
// R
load_matrix_to_device(&R, &d_R);
CCDKernel << <dimGrid, dimBlock >> > (d_A, d_W, d_R, d_H, lambda);
// store error in host
load_matrix_to_host(&W, &d_W);
load_matrix_to_host(&H, &d_H);
load_matrix_to_host(&R, &d_R);
load_matrix_to_host(&A, &d_A);
/*double res = 0;
for (int i = 0; i < 10; i++) {
res += W.array_[70 + i] * H.array_[30 + i];
}
cout << "iter " << i << " A : " << A.array_[73] << " R : " << R.array_[73] << " res " << res << "\n" ;
*/
}
dim3 dimBlock1(16, 16);
dim3 dimGrid1(6, 6);
Matrix AR = Matrix(96, 96, elem);
Matrix d_AR;
load_matrix_to_device(&AR, &d_AR);
MatMulKernel << <dimGrid1, dimBlock1 >> > (d_W, d_H, d_AR);
load_matrix_to_host(&AR, &d_AR);
// Read W,H,R from device memory
double erro = 0;
for (int i = 0; i < 9216; i++) {
if (A.array_[i] != 0.0) {
erro += (AR.array_[i] - A.array_[i])*(AR.array_[i] - A.array_[i]);
}
//cout << "AR : " << AR.array_[i] << " A :" << A.array_[i] << "\n " ;
}
erro = erro / 9216;
cout << sqrt(erro) << " iter : " <<iter ;
printf("Time taken: %.2fs\n", (double)(clock() - tStart) / CLOCKS_PER_SEC);
// Free device memory
cudaFree(d_W.array_); cudaFree(d_H.array_); cudaFree(d_R.array_); cudaFree(d_A.array_);
}
system("pause");
} |
8acbad9faab48ffc505ee3a225b45c8acf06c32b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif // USE_GREENTEA
namespace caffe {
#ifdef USE_ROCM
template<typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w,
const int ext_kernel_h, const int ext_kernel_w,
const int stride_h, const int stride_w,
const int kstride_h, const int kstride_w,
const int pad_h, const int pad_w,
Dtype* top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + ext_kernel_h, height);
int wend = min(wstart + ext_kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; h += kstride_h) {
for (int w = wstart; w < wend; w += kstride_w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template<typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w,
const int ext_kernel_h, const int ext_kernel_w,
const int stride_h, const int stride_w,
const int kstride_h, const int kstride_w,
const int pad_h, const int pad_w,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + ext_kernel_h, height + pad_h);
int wend = min(wstart + ext_kernel_w, width + pad_w);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
int pool_size = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
++pool_size;
}
}
top_data[index] = aveval / pool_size;
}
}
template<typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* bottom_data, const int num,
const int channels, const int height,
const int width, const int pooled_height,
const int pooled_width, const int kernel_h,
const int kernel_w, const int ext_kernel_h,
const int ext_kernel_w, const int stride_h,
const int stride_w, const int kstride_h,
const int kstride_w, Dtype* rand_idx,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + ext_kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + ext_kernel_w, width);
Dtype cumsum = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; h += kstride_h) {
for (int w = wstart; w < wend; w += kstride_w) {
cumsum += bottom_data[h * width + w];
}
}
float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; h += kstride_h) {
for (int w = wstart; w < wend; w += kstride_w) {
cumsum += bottom_data[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_data[h * width + w];
return;
}
}
}
}
}
template<typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data,
const int num, const int channels,
const int height, const int width,
const int pooled_height,
const int pooled_width, const int kernel_h,
const int kernel_w, const int ext_kernel_h,
const int ext_kernel_w, const int stride_h,
const int stride_w, const int kstride_h,
const int kstride_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + ext_kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + ext_kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; h += kstride_h) {
for (int w = wstart; w < wend; w += kstride_w) {
cumsum += bottom_data[h * width + w];
cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
#endif // USE_ROCM
template<typename Dtype>
void PoolingSKLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1;
int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == caffe::TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_, top_data);
}
break;
default: {
LOG(FATAL)<< "Unknown pooling method.";
}
}
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX: {
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel(
CL_KERNEL_SELECT("max_pool_forward_sk"));
viennacl::ocl::enqueue(
oclk_max_pool_forward(count,
WrapHandle((cl_mem) bottom_data, &ctx),
bottom[0]->num(), channels_, height_, width_,
pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_,
WrapHandle((cl_mem) top_data, &ctx),
mask == NULL ? 0 : 1,
WrapHandle((cl_mem) mask, &ctx),
WrapHandle((cl_mem) top_mask, &ctx)),
ctx.get_queue());
}
break;
case PoolingParameter_PoolMethod_AVE: {
viennacl::ocl::kernel &oclk_ave_pool_forward = program.get_kernel(
CL_KERNEL_SELECT("ave_pool_forward_sk"));
viennacl::ocl::enqueue(
oclk_ave_pool_forward(count,
WrapHandle((cl_mem) bottom_data, &ctx),
bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_, WrapHandle((cl_mem)top_data, &ctx)),
ctx.get_queue());
}
break;
case PoolingParameter_PoolMethod_STOCHASTIC: {
if (this->phase_ == caffe::TRAIN) {
// We need to create the random index as well.
greentea_gpu_rng_uniform(this->device_context_->id(), count,
Dtype(0), Dtype(1),
(cl_mem)(rand_idx_.mutable_gpu_data()), 0);
viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel(
CL_KERNEL_SELECT("sto_pool_forward_train_sk"));
viennacl::ocl::enqueue(
oclk_sto_pool_forward(count,
WrapHandle((cl_mem)bottom_data, &ctx),
bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()), &ctx),
WrapHandle((cl_mem)(top_data), &ctx)),
ctx.get_queue());
} else {
viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel(
CL_KERNEL_SELECT("sto_pool_forward_test_sk"));
viennacl::ocl::enqueue(
oclk_sto_pool_forward(count,
WrapHandle((cl_mem)bottom_data, &ctx),
bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
WrapHandle((cl_mem)top_data, &ctx)),
ctx.get_queue());
}
}
break;
default: {
LOG(FATAL)<< "Unknown pooling method.";
}
}
#endif // USE_GREENTEA
}
}
#ifdef USE_ROCM
template<typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
const int* mask, const Dtype* top_mask,
const int num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w,
const int ext_kernel_h, const int ext_kernel_w,
const int stride_h, const int stride_w,
const int kstride_h, const int kstride_w,
const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int pooled_height_1 = pooled_height - 1;
int pooled_width_1 = pooled_width - 1;
int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;
int phend =
(h >= pooled_height) ?
pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;
int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;
int pwend =
(w >= pooled_width) ?
pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;
Dtype gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
if (mask) {
mask += offset;
for (int ph = phstart; ph <= phend; ph += kstride_h) {
for (int pw = pwstart; pw <= pwend; pw += kstride_w) {
if (mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
} else {
top_mask += offset;
for (int ph = phstart; ph <= phend; ph += kstride_h) {
for (int pw = pwstart; pw <= pwend; pw += kstride_w) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
#endif // USE_ROCM
template<typename Dtype>
void PoolingSKLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1;
int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_gpu_set(count, Dtype(0.), bottom_diff);
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_,
bottom_diff);
break;
default:
LOG(FATAL)<<
"Unknown or unsupported pooling method in Backward_gpu().";
}
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
greentea_gpu_set(this->device_context_->id(), count, Dtype(0.),
(cl_mem) bottom_diff, 0);
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX: {
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel(
CL_KERNEL_SELECT("max_pool_backward_sk"));
viennacl::ocl::enqueue(
oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx),
mask == NULL ? 0 : 1,
WrapHandle((cl_mem) mask, &ctx),
WrapHandle((cl_mem) top_mask, &ctx),
top[0]->num(), channels_, height_, width_,
pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_,
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
}
break;
default:
LOG(FATAL)<<
"Unknown or unsupported pooling method in Backward_gpu().";
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingSKLayer);
} // namespace caffe
| 8acbad9faab48ffc505ee3a225b45c8acf06c32b.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif // USE_GREENTEA
namespace caffe {
#ifdef USE_CUDA
template<typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w,
const int ext_kernel_h, const int ext_kernel_w,
const int stride_h, const int stride_w,
const int kstride_h, const int kstride_w,
const int pad_h, const int pad_w,
Dtype* top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + ext_kernel_h, height);
int wend = min(wstart + ext_kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; h += kstride_h) {
for (int w = wstart; w < wend; w += kstride_w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template<typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w,
const int ext_kernel_h, const int ext_kernel_w,
const int stride_h, const int stride_w,
const int kstride_h, const int kstride_w,
const int pad_h, const int pad_w,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + ext_kernel_h, height + pad_h);
int wend = min(wstart + ext_kernel_w, width + pad_w);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
int pool_size = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
++pool_size;
}
}
top_data[index] = aveval / pool_size;
}
}
template<typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* bottom_data, const int num,
const int channels, const int height,
const int width, const int pooled_height,
const int pooled_width, const int kernel_h,
const int kernel_w, const int ext_kernel_h,
const int ext_kernel_w, const int stride_h,
const int stride_w, const int kstride_h,
const int kstride_w, Dtype* rand_idx,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + ext_kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + ext_kernel_w, width);
Dtype cumsum = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; h += kstride_h) {
for (int w = wstart; w < wend; w += kstride_w) {
cumsum += bottom_data[h * width + w];
}
}
float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; h += kstride_h) {
for (int w = wstart; w < wend; w += kstride_w) {
cumsum += bottom_data[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_data[h * width + w];
return;
}
}
}
}
}
template<typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data,
const int num, const int channels,
const int height, const int width,
const int pooled_height,
const int pooled_width, const int kernel_h,
const int kernel_w, const int ext_kernel_h,
const int ext_kernel_w, const int stride_h,
const int stride_w, const int kstride_h,
const int kstride_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + ext_kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + ext_kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; h += kstride_h) {
for (int w = wstart; w < wend; w += kstride_w) {
cumsum += bottom_data[h * width + w];
cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
#endif // USE_CUDA
template<typename Dtype>
void PoolingSKLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1;
int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == caffe::TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_, top_data);
}
break;
default: {
LOG(FATAL)<< "Unknown pooling method.";
}
}
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX: {
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel(
CL_KERNEL_SELECT("max_pool_forward_sk"));
viennacl::ocl::enqueue(
oclk_max_pool_forward(count,
WrapHandle((cl_mem) bottom_data, &ctx),
bottom[0]->num(), channels_, height_, width_,
pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_,
WrapHandle((cl_mem) top_data, &ctx),
mask == NULL ? 0 : 1,
WrapHandle((cl_mem) mask, &ctx),
WrapHandle((cl_mem) top_mask, &ctx)),
ctx.get_queue());
}
break;
case PoolingParameter_PoolMethod_AVE: {
viennacl::ocl::kernel &oclk_ave_pool_forward = program.get_kernel(
CL_KERNEL_SELECT("ave_pool_forward_sk"));
viennacl::ocl::enqueue(
oclk_ave_pool_forward(count,
WrapHandle((cl_mem) bottom_data, &ctx),
bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_, WrapHandle((cl_mem)top_data, &ctx)),
ctx.get_queue());
}
break;
case PoolingParameter_PoolMethod_STOCHASTIC: {
if (this->phase_ == caffe::TRAIN) {
// We need to create the random index as well.
greentea_gpu_rng_uniform(this->device_context_->id(), count,
Dtype(0), Dtype(1),
(cl_mem)(rand_idx_.mutable_gpu_data()), 0);
viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel(
CL_KERNEL_SELECT("sto_pool_forward_train_sk"));
viennacl::ocl::enqueue(
oclk_sto_pool_forward(count,
WrapHandle((cl_mem)bottom_data, &ctx),
bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()), &ctx),
WrapHandle((cl_mem)(top_data), &ctx)),
ctx.get_queue());
} else {
viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel(
CL_KERNEL_SELECT("sto_pool_forward_test_sk"));
viennacl::ocl::enqueue(
oclk_sto_pool_forward(count,
WrapHandle((cl_mem)bottom_data, &ctx),
bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
WrapHandle((cl_mem)top_data, &ctx)),
ctx.get_queue());
}
}
break;
default: {
LOG(FATAL)<< "Unknown pooling method.";
}
}
#endif // USE_GREENTEA
}
}
#ifdef USE_CUDA
template<typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
const int* mask, const Dtype* top_mask,
const int num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w,
const int ext_kernel_h, const int ext_kernel_w,
const int stride_h, const int stride_w,
const int kstride_h, const int kstride_w,
const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int pooled_height_1 = pooled_height - 1;
int pooled_width_1 = pooled_width - 1;
int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;
int phend =
(h >= pooled_height) ?
pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;
int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;
int pwend =
(w >= pooled_width) ?
pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;
Dtype gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
if (mask) {
mask += offset;
for (int ph = phstart; ph <= phend; ph += kstride_h) {
for (int pw = pwstart; pw <= pwend; pw += kstride_w) {
if (mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
} else {
top_mask += offset;
for (int ph = phstart; ph <= phend; ph += kstride_h) {
for (int pw = pwstart; pw <= pwend; pw += kstride_w) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
#endif // USE_CUDA
template<typename Dtype>
void PoolingSKLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1;
int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_set(count, Dtype(0.), bottom_diff);
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_,
bottom_diff);
break;
default:
LOG(FATAL)<<
"Unknown or unsupported pooling method in Backward_gpu().";
}
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
greentea_gpu_set(this->device_context_->id(), count, Dtype(0.),
(cl_mem) bottom_diff, 0);
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX: {
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel(
CL_KERNEL_SELECT("max_pool_backward_sk"));
viennacl::ocl::enqueue(
oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx),
mask == NULL ? 0 : 1,
WrapHandle((cl_mem) mask, &ctx),
WrapHandle((cl_mem) top_mask, &ctx),
top[0]->num(), channels_, height_, width_,
pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h, ext_kernel_w,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_,
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
}
break;
default:
LOG(FATAL)<<
"Unknown or unsupported pooling method in Backward_gpu().";
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingSKLayer);
} // namespace caffe
|
9d980f98b930c8b4310c9bf2bef98db06dc56412.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hiprand/hiprand.h>
//best results: 41.3 BUPS
int main(){
size_t n = 536870912;
int i;
hiprandGenerator_t gen;
float *devData;
float f;
hipMalloc((void **)&devData, n * sizeof(float));
//hiprandCreateGenerator(&gen,HIPRAND_RNG_PSEUDO_MT19937);
hiprandCreateGenerator(&gen,HIPRAND_RNG_PSEUDO_XORWOW);
hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
clock_t start=clock();
for(i=0;i<100;i++) hiprandGenerateUniform(gen, devData, n);
hipDeviceSynchronize();
printf("time1 = %f seconds\n",(float)(clock()-start)/CLOCKS_PER_SEC);
hipMemcpy(&f, devData, sizeof(float),hipMemcpyDeviceToHost);
printf("time2 = %f seconds\n",(float)(clock()-start)/CLOCKS_PER_SEC);
hiprandDestroyGenerator(gen);
hipFree(devData);
printf("time3 = %f seconds\n",(float)(clock()-start)/CLOCKS_PER_SEC);
return 0;
}
| 9d980f98b930c8b4310c9bf2bef98db06dc56412.cu | #include <stdio.h>
#include <curand.h>
//best results: 41.3 BUPS
int main(){
size_t n = 536870912;
int i;
curandGenerator_t gen;
float *devData;
float f;
cudaMalloc((void **)&devData, n * sizeof(float));
//curandCreateGenerator(&gen,CURAND_RNG_PSEUDO_MT19937);
curandCreateGenerator(&gen,CURAND_RNG_PSEUDO_XORWOW);
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
clock_t start=clock();
for(i=0;i<100;i++) curandGenerateUniform(gen, devData, n);
cudaDeviceSynchronize();
printf("time1 = %f seconds\n",(float)(clock()-start)/CLOCKS_PER_SEC);
cudaMemcpy(&f, devData, sizeof(float),cudaMemcpyDeviceToHost);
printf("time2 = %f seconds\n",(float)(clock()-start)/CLOCKS_PER_SEC);
curandDestroyGenerator(gen);
cudaFree(devData);
printf("time3 = %f seconds\n",(float)(clock()-start)/CLOCKS_PER_SEC);
return 0;
}
|
6311c2b001ebebfa3e5deee9ab5198d7b7b4da80.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
const int N= 1024; // matrix size is NxN
const int K= 32; // tile size is KxK
// Utility functions: compare, print, and fill matrices
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line)
{
if (err != hipSuccess) {
fprintf(stderr, "CUDA error at: %s : %d\n", file,line);
fprintf(stderr, "%s %s\n", hipGetErrorString(err), func);;
exit(1);
}
}
int compare_matrices(float *gpu, float *ref)
{
int result = 0;
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
if (ref[i + j*N] != gpu[i + j*N])
{
// printf("reference(%d,%d) = %f but test(%d,%d) = %f\n",
// i,j,ref[i+j*N],i,j,test[i+j*N]);
result = 1;
}
return result;
}
void print_matrix(float *mat)
{
for(int j=0; j < N; j++)
{
for(int i=0; i < N; i++) { printf("%4.4g ", mat[i + j*N]); }
printf("\n");
}
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
void
transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__ void
transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__ void
transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in KxK threadblocks
// thread (x,y) in grid writes element (i,j) of output matrix
__global__ void
transpose_parallel_per_element(float in[], float out[])
{
int i = blockIdx.x * K + threadIdx.x;
int j = blockIdx.y * K + threadIdx.y;
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in);
transpose_CPU(in, gold);
float *d_in, *d_out;
hipMalloc(&d_in, numbytes);
hipMalloc(&d_out, numbytes);
hipMemcpy(d_in, in, numbytes, hipMemcpyHostToDevice);
GpuTimer timer;
/*
* Now time each kernel and verify that it produces the correct result.
*
* To be really careful about benchmarking purposes, we should run every kernel once
* to "warm" the system and avoid any compilation or code-caching effects, then run
* every kernel 10 or 100 times and average the timings to smooth out any variance.
* But this makes for messy code and our goal is teaching, not detailed benchmarking.
*/
timer.Start();
hipLaunchKernelGGL(( transpose_serial), dim3(1),dim3(1), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_row), dim3(1),dim3(N), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks(N/K,N/K); // blocks per grid
dim3 threads(K,K); // threads per block
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element), dim3(blocks),dim3(threads), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled), dim3(blocks),dim3(threads), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks16x16(N/16,N/16); // blocks per grid
dim3 threads16x16(16,16); // threads per block
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled16), dim3(blocks16x16),dim3(threads16x16), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled 16x16: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled_padded16), dim3(blocks16x16),dim3(threads16x16), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_padded 16x16: %g ms.\nVerifying...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
hipFree(d_in);
hipFree(d_out);
}
| 6311c2b001ebebfa3e5deee9ab5198d7b7b4da80.cu | #include <stdio.h>
#include "gputimer.h"
const int N= 1024; // matrix size is NxN
const int K= 32; // tile size is KxK
// Utility functions: compare, print, and fill matrices
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line)
{
if (err != cudaSuccess) {
fprintf(stderr, "CUDA error at: %s : %d\n", file,line);
fprintf(stderr, "%s %s\n", cudaGetErrorString(err), func);;
exit(1);
}
}
int compare_matrices(float *gpu, float *ref)
{
int result = 0;
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
if (ref[i + j*N] != gpu[i + j*N])
{
// printf("reference(%d,%d) = %f but test(%d,%d) = %f\n",
// i,j,ref[i+j*N],i,j,test[i+j*N]);
result = 1;
}
return result;
}
void print_matrix(float *mat)
{
for(int j=0; j < N; j++)
{
for(int i=0; i < N; i++) { printf("%4.4g ", mat[i + j*N]); }
printf("\n");
}
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
void
transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__ void
transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__ void
transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in KxK threadblocks
// thread (x,y) in grid writes element (i,j) of output matrix
__global__ void
transpose_parallel_per_element(float in[], float out[])
{
int i = blockIdx.x * K + threadIdx.x;
int j = blockIdx.y * K + threadIdx.y;
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in);
transpose_CPU(in, gold);
float *d_in, *d_out;
cudaMalloc(&d_in, numbytes);
cudaMalloc(&d_out, numbytes);
cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice);
GpuTimer timer;
/*
* Now time each kernel and verify that it produces the correct result.
*
* To be really careful about benchmarking purposes, we should run every kernel once
* to "warm" the system and avoid any compilation or code-caching effects, then run
* every kernel 10 or 100 times and average the timings to smooth out any variance.
* But this makes for messy code and our goal is teaching, not detailed benchmarking.
*/
timer.Start();
transpose_serial<<<1,1>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
transpose_parallel_per_row<<<1,N>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks(N/K,N/K); // blocks per grid
dim3 threads(K,K); // threads per block
timer.Start();
transpose_parallel_per_element<<<blocks,threads>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
transpose_parallel_per_element_tiled<<<blocks,threads>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks16x16(N/16,N/16); // blocks per grid
dim3 threads16x16(16,16); // threads per block
timer.Start();
transpose_parallel_per_element_tiled16<<<blocks16x16,threads16x16>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled 16x16: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
transpose_parallel_per_element_tiled_padded16<<<blocks16x16,threads16x16>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_padded 16x16: %g ms.\nVerifying...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
cudaFree(d_in);
cudaFree(d_out);
}
|
41982512be2252fd1b4353124879cf393a9e4a55.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Simplest matrix multiplication in CUDA
*
* Scott B. Baden, University of California, San Diego
* April 2010
*
* We compute C = A * B
*
* This code assumes that the matrices are square though there
* are hooks to facilitate extending the code to non-square matrices
*
*/
// system includes
#include <stdio.h>
#include <assert.h>
#include <iostream>
// include the kernel
#include "mmpy_kernel.cu"
#include "types.h"
#include "utils.h"
// External function definitions
void genMatrix(_DOUBLE_ *a, unsigned int m, unsigned int n);
void genMatrix_bt(_DOUBLE_ *a, _DOUBLE_ *b, unsigned int n);
void genMatrix_rand(_DOUBLE_ *a, _DOUBLE_ *b, unsigned int n);
void verify(_DOUBLE_ *c, unsigned int m, unsigned int n, _DOUBLE_ eps, const char *mesg);
void verify_bt(_DOUBLE_ *c, unsigned int n, const char *mesg);
void verify(_DOUBLE_ *c_d, _DOUBLE_ *c_h, unsigned int m, unsigned int n, _DOUBLE_ eps, const char *mesg);
void verify_bt(_DOUBLE_ *c_d, _DOUBLE_ *c_h, unsigned int n, const char *mesg);
void verify_bt(_DOUBLE_ *c_d, _DOUBLE_ *c_h, unsigned int m, unsigned int n, const char *mesg);
void verify_rand(_DOUBLE_ *a, _DOUBLE_ *b, _DOUBLE_ *c, unsigned int n);
void printMatrix(_DOUBLE_ *a, unsigned int m, unsigned int n);
void cmdLine(int argc, char *argv[], int& n, int& reps, int& ntx, int& nty, _DOUBLE_ & eps, int& do_host, int& prefer_l1, int& use_rand, int& use_bt, int& use_shm_double);
void perfString(int n, int ntx, int nty, int reps, double t_h, double gflops_h, double t_d, double gflops_d, int do_host, int prefer_l1, int use_rand, int use_bt, int use_shm_double);
// extern "C"{
double getTime();
double gflops(int n, int niter, double time);
//}
void matMulHost(_DOUBLE_ *, const _DOUBLE_ *, const _DOUBLE_ *, unsigned int, unsigned int);
void setGrid(int n, dim3 &blockDim, dim3 &gridDim);
int
main(int argc, char **argv)
{
// To improve repeatabilty of measurements taken on the device,
// we multiply the number of reps by this scale factor
// Adjust as needed
const int SCALE = 10;
// Read in the command line elements
int n, reps, ntx, nty, do_host, prefer_l1, use_rand, use_bt, use_shm_double;
_DOUBLE_ eps;
cmdLine(argc, argv, n, reps, ntx, nty, eps, do_host, prefer_l1, use_rand, use_bt, use_shm_double);
// The thread geometry must evenly divide N
/*if ((n % ntx != 0) || (n % nty != 0) )
* {
* printf("Thread geometry: %d x %d\n",ntx, nty);
* printf("The length of the thread geometry axis ");
* printf("[ %d x %d]\n",ntx, nty);
* printf(" nust divide N [%d] evenly\n",n);
* exit(-1);
* }
*/
// Total amount of storage for entries
unsigned int n2 = n * n * sizeof(_DOUBLE_);
// Report on Device Characteristics
int capability = ReportDevice();
#ifdef _DOUBLE
int major = capability / 100;
int minor = capability % 100;
if ((major == 1) && (minor < 3)) {
printf(" *** You are running on a capability %d.%d device\n", major, minor);
printf(" which does not support double precision arithmetic.\n");
printf(" Recompile with single precision.\n\n");
exit(-1);
}
#endif
// setup execution configurations
int _ntx, _nty;
#if (!defined(BLOCKDIM_X) && !defined(BLOCKDIM_Y))
_ntx = ntx;
_nty = nty;
#else
_ntx = BLOCKDIM_X;
_nty = BLOCKDIM_Y;
#endif
dim3 threads(_ntx, _nty, 1);
int numblocksX = n / _ntx;
int numblocksY = n / _nty;
if (n % _ntx != 0)
numblocksX++;
if (n % _nty != 0)
numblocksY++;
dim3 grid(numblocksX, numblocksY, 1);
setGrid(n, threads, grid);
// print configurations
printf("n: %d, tx: %d, ty: %d, gridX: %d, gridY: %d, reps: %d, epsilon: %g\n\n", n, threads.x, threads.y, grid.x, grid.y, reps, eps);
#ifndef _DOUBLE
printf("Using Single precision arithmetic\n\n");
#else
printf("Using Double precision arithmetic\n\n");
#endif
if (use_bt)
printf("Using bidiagonal inputs\n");
if (use_rand)
printf("Using random inputs\n");
if (do_host)
printf("Doing host computation for comparison\n\n");
printf("\n");
// allocate an initialize host memory for A and B matrices
_DOUBLE_ *h_A = (_DOUBLE_ *)malloc(n2);
assert(h_A);
_DOUBLE_ *h_B = (_DOUBLE_ *)malloc(n2);
assert(h_B);
if (use_bt) {
genMatrix_bt(h_A, h_B, n);
} else if (use_rand) {
genMatrix_rand(h_A, h_B, n);
} else {
genMatrix(h_A, n, n);
genMatrix(h_B, n, n);
}
if (n <= 8) {
cout << "\nA:\n";
printMatrix(h_A, n, n);
cout << "\nB:\n";
printMatrix(h_B, n, n);
}
_DOUBLE_ *hostC;
double t_host = 0.0, gflops_h = 0.0;
if (do_host) {
// compute matrix product on the host
hostC = (_DOUBLE_ *)malloc(n2);
t_host = -getTime();
for (int r = 0; r < reps; r++)
matMulHost(hostC, h_A, h_B, n, n);
t_host += getTime();
gflops_h = gflops(n, reps, t_host);
printf("Host computation time: %f sec. [%f gflops]\n", t_host, gflops_h);
// Verify host result
if (use_bt)
verify_bt(hostC, n, "Host result");
else if (use_rand)
cout << "Verfication of host result not supported for random matrices\n";
else
verify(hostC, n, n, eps, "Host result");
if (n <= 8) {
printf("\nC:\n");
printMatrix(hostC, n, n);
}
}
// allocate device memory
_DOUBLE_ *d_A, *d_B, *d_C;
hipMalloc((void **)&d_A, n2);
checkCUDAError("Error allocating device memory for matrix A");
hipMalloc((void **)&d_B, n2);
checkCUDAError("Error allocating device memory for matrix B");
hipMalloc((void **)&d_C, n2);
checkCUDAError("Error allocating device memory for matrix C");
hipMemset((void **)d_A, -99, n2);
checkCUDAError("Error initializing device memory matrix A");
hipMemset((void **)d_B, -99, n2);
checkCUDAError("Error initializing device memory matrix B");
hipMemset((void **)d_C, 0, n2);
checkCUDAError("Error clearing device memory matrix C");
// copy host memory to device
hipMemcpy(d_A, h_A, n2, hipMemcpyHostToDevice);
checkCUDAError("Error copying matrix A to device");
hipMemcpy(d_B, h_B, n2, hipMemcpyHostToDevice);
checkCUDAError("Error copying matrix B to device");
// allocate host memory for the result
_DOUBLE_ *h_C = (_DOUBLE_ *)malloc(n2);
assert(h_C);
// If we set the preference for L1 cache, rather than
// shared memory, we may run slightly faster on devices that have the capability
hipFuncCache_t Preference;
if (prefer_l1)
Preference = hipFuncCachePreferL1;
else
Preference = hipFuncCachePreferShared;
hipFuncSetCacheConfig(matMul, Preference);
hipSharedMemConfig shmPreference;
if (use_shm_double)
shmPreference = hipSharedMemBankSizeEightByte;
else
shmPreference = hipSharedMemBankSizeFourByte;
hipFuncSetSharedMemConfig(matMul, shmPreference);
// Start the timer
#ifdef CUDA_TIMER
hipEvent_t start_event, stop_event;
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
#endif
#ifdef CUDA_TIMER
hipEventRecord(start_event, 0);
float t_device;
#else
hipDeviceSynchronize();
double t_device = -getTime();
#endif
// execute the kernel
for (int r = 0; r < SCALE * reps; r++)
matMul << < grid, threads >> > (n, d_C, d_A, d_B);
#ifdef CUDA_TIMER
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&t_device, start_event, stop_event);
t_device /= 1000.0;
#else
// block until the device has finished
hipDeviceSynchronize();
// Stop the timer
t_device += getTime();
#endif
checkCUDAError("Error in matrixMul kernel");
// copy result from device to host
hipMemcpy(h_C, d_C, n2, hipMemcpyDeviceToHost);
checkCUDAError("Unable to retrieve result from device");
double gflops_d = gflops(n, SCALE * reps, t_device);
printf("Device computation time: %f sec. [%f gflops]\n", t_device, gflops_d);
perfString(n, ntx, nty, reps, t_host, gflops_h, t_device, gflops_d, do_host, prefer_l1, use_rand, use_bt, use_shm_double);
if (n <= 8) {
printf("\nC (device):\n");
printMatrix(h_C, n, n);
}
// Verify the device result
if (use_bt)
verify_bt(h_C, n, "Device result");
else if (use_rand)
verify_rand(h_A, h_B, h_C, n);
else
verify(h_C, n, n, eps, "Device result");
// But not for random matrices
if (do_host) {
// Compare host and device results
if (use_bt)
verify_bt(h_C, hostC, n, "Device vs. host");
else if (!use_rand)
verify(h_C, hostC, n, n, eps, "Device vs. host");
}
// clean up memory
free(h_A);
free(h_B);
free(h_C);
if (do_host)
free(hostC);
assert(hipSuccess == hipFree(d_A));
assert(hipSuccess == hipFree(d_B));
assert(hipSuccess == hipFree(d_C));
hipDeviceReset();
}
| 41982512be2252fd1b4353124879cf393a9e4a55.cu | /*
* Simplest matrix multiplication in CUDA
*
* Scott B. Baden, University of California, San Diego
* April 2010
*
* We compute C = A * B
*
* This code assumes that the matrices are square though there
* are hooks to facilitate extending the code to non-square matrices
*
*/
// system includes
#include <stdio.h>
#include <assert.h>
#include <iostream>
// include the kernel
#include "mmpy_kernel.cu"
#include "types.h"
#include "utils.h"
// External function definitions
void genMatrix(_DOUBLE_ *a, unsigned int m, unsigned int n);
void genMatrix_bt(_DOUBLE_ *a, _DOUBLE_ *b, unsigned int n);
void genMatrix_rand(_DOUBLE_ *a, _DOUBLE_ *b, unsigned int n);
void verify(_DOUBLE_ *c, unsigned int m, unsigned int n, _DOUBLE_ eps, const char *mesg);
void verify_bt(_DOUBLE_ *c, unsigned int n, const char *mesg);
void verify(_DOUBLE_ *c_d, _DOUBLE_ *c_h, unsigned int m, unsigned int n, _DOUBLE_ eps, const char *mesg);
void verify_bt(_DOUBLE_ *c_d, _DOUBLE_ *c_h, unsigned int n, const char *mesg);
void verify_bt(_DOUBLE_ *c_d, _DOUBLE_ *c_h, unsigned int m, unsigned int n, const char *mesg);
void verify_rand(_DOUBLE_ *a, _DOUBLE_ *b, _DOUBLE_ *c, unsigned int n);
void printMatrix(_DOUBLE_ *a, unsigned int m, unsigned int n);
void cmdLine(int argc, char *argv[], int& n, int& reps, int& ntx, int& nty, _DOUBLE_ & eps, int& do_host, int& prefer_l1, int& use_rand, int& use_bt, int& use_shm_double);
void perfString(int n, int ntx, int nty, int reps, double t_h, double gflops_h, double t_d, double gflops_d, int do_host, int prefer_l1, int use_rand, int use_bt, int use_shm_double);
// extern "C"{
double getTime();
double gflops(int n, int niter, double time);
//}
void matMulHost(_DOUBLE_ *, const _DOUBLE_ *, const _DOUBLE_ *, unsigned int, unsigned int);
void setGrid(int n, dim3 &blockDim, dim3 &gridDim);
int
main(int argc, char **argv)
{
// To improve repeatabilty of measurements taken on the device,
// we multiply the number of reps by this scale factor
// Adjust as needed
const int SCALE = 10;
// Read in the command line elements
int n, reps, ntx, nty, do_host, prefer_l1, use_rand, use_bt, use_shm_double;
_DOUBLE_ eps;
cmdLine(argc, argv, n, reps, ntx, nty, eps, do_host, prefer_l1, use_rand, use_bt, use_shm_double);
// The thread geometry must evenly divide N
/*if ((n % ntx != 0) || (n % nty != 0) )
* {
* printf("Thread geometry: %d x %d\n",ntx, nty);
* printf("The length of the thread geometry axis ");
* printf("[ %d x %d]\n",ntx, nty);
* printf(" nust divide N [%d] evenly\n",n);
* exit(-1);
* }
*/
// Total amount of storage for entries
unsigned int n2 = n * n * sizeof(_DOUBLE_);
// Report on Device Characteristics
int capability = ReportDevice();
#ifdef _DOUBLE
int major = capability / 100;
int minor = capability % 100;
if ((major == 1) && (minor < 3)) {
printf(" *** You are running on a capability %d.%d device\n", major, minor);
printf(" which does not support double precision arithmetic.\n");
printf(" Recompile with single precision.\n\n");
exit(-1);
}
#endif
// setup execution configurations
int _ntx, _nty;
#if (!defined(BLOCKDIM_X) && !defined(BLOCKDIM_Y))
_ntx = ntx;
_nty = nty;
#else
_ntx = BLOCKDIM_X;
_nty = BLOCKDIM_Y;
#endif
dim3 threads(_ntx, _nty, 1);
int numblocksX = n / _ntx;
int numblocksY = n / _nty;
if (n % _ntx != 0)
numblocksX++;
if (n % _nty != 0)
numblocksY++;
dim3 grid(numblocksX, numblocksY, 1);
setGrid(n, threads, grid);
// print configurations
printf("n: %d, tx: %d, ty: %d, gridX: %d, gridY: %d, reps: %d, epsilon: %g\n\n", n, threads.x, threads.y, grid.x, grid.y, reps, eps);
#ifndef _DOUBLE
printf("Using Single precision arithmetic\n\n");
#else
printf("Using Double precision arithmetic\n\n");
#endif
if (use_bt)
printf("Using bidiagonal inputs\n");
if (use_rand)
printf("Using random inputs\n");
if (do_host)
printf("Doing host computation for comparison\n\n");
printf("\n");
// allocate an initialize host memory for A and B matrices
_DOUBLE_ *h_A = (_DOUBLE_ *)malloc(n2);
assert(h_A);
_DOUBLE_ *h_B = (_DOUBLE_ *)malloc(n2);
assert(h_B);
if (use_bt) {
genMatrix_bt(h_A, h_B, n);
} else if (use_rand) {
genMatrix_rand(h_A, h_B, n);
} else {
genMatrix(h_A, n, n);
genMatrix(h_B, n, n);
}
if (n <= 8) {
cout << "\nA:\n";
printMatrix(h_A, n, n);
cout << "\nB:\n";
printMatrix(h_B, n, n);
}
_DOUBLE_ *hostC;
double t_host = 0.0, gflops_h = 0.0;
if (do_host) {
// compute matrix product on the host
hostC = (_DOUBLE_ *)malloc(n2);
t_host = -getTime();
for (int r = 0; r < reps; r++)
matMulHost(hostC, h_A, h_B, n, n);
t_host += getTime();
gflops_h = gflops(n, reps, t_host);
printf("Host computation time: %f sec. [%f gflops]\n", t_host, gflops_h);
// Verify host result
if (use_bt)
verify_bt(hostC, n, "Host result");
else if (use_rand)
cout << "Verfication of host result not supported for random matrices\n";
else
verify(hostC, n, n, eps, "Host result");
if (n <= 8) {
printf("\nC:\n");
printMatrix(hostC, n, n);
}
}
// allocate device memory
_DOUBLE_ *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, n2);
checkCUDAError("Error allocating device memory for matrix A");
cudaMalloc((void **)&d_B, n2);
checkCUDAError("Error allocating device memory for matrix B");
cudaMalloc((void **)&d_C, n2);
checkCUDAError("Error allocating device memory for matrix C");
cudaMemset((void **)d_A, -99, n2);
checkCUDAError("Error initializing device memory matrix A");
cudaMemset((void **)d_B, -99, n2);
checkCUDAError("Error initializing device memory matrix B");
cudaMemset((void **)d_C, 0, n2);
checkCUDAError("Error clearing device memory matrix C");
// copy host memory to device
cudaMemcpy(d_A, h_A, n2, cudaMemcpyHostToDevice);
checkCUDAError("Error copying matrix A to device");
cudaMemcpy(d_B, h_B, n2, cudaMemcpyHostToDevice);
checkCUDAError("Error copying matrix B to device");
// allocate host memory for the result
_DOUBLE_ *h_C = (_DOUBLE_ *)malloc(n2);
assert(h_C);
// If we set the preference for L1 cache, rather than
// shared memory, we may run slightly faster on devices that have the capability
cudaFuncCache Preference;
if (prefer_l1)
Preference = cudaFuncCachePreferL1;
else
Preference = cudaFuncCachePreferShared;
cudaFuncSetCacheConfig(matMul, Preference);
cudaSharedMemConfig shmPreference;
if (use_shm_double)
shmPreference = cudaSharedMemBankSizeEightByte;
else
shmPreference = cudaSharedMemBankSizeFourByte;
cudaFuncSetSharedMemConfig(matMul, shmPreference);
// Start the timer
#ifdef CUDA_TIMER
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
#endif
#ifdef CUDA_TIMER
cudaEventRecord(start_event, 0);
float t_device;
#else
cudaThreadSynchronize();
double t_device = -getTime();
#endif
// execute the kernel
for (int r = 0; r < SCALE * reps; r++)
matMul << < grid, threads >> > (n, d_C, d_A, d_B);
#ifdef CUDA_TIMER
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&t_device, start_event, stop_event);
t_device /= 1000.0;
#else
// block until the device has finished
cudaThreadSynchronize();
// Stop the timer
t_device += getTime();
#endif
checkCUDAError("Error in matrixMul kernel");
// copy result from device to host
cudaMemcpy(h_C, d_C, n2, cudaMemcpyDeviceToHost);
checkCUDAError("Unable to retrieve result from device");
double gflops_d = gflops(n, SCALE * reps, t_device);
printf("Device computation time: %f sec. [%f gflops]\n", t_device, gflops_d);
perfString(n, ntx, nty, reps, t_host, gflops_h, t_device, gflops_d, do_host, prefer_l1, use_rand, use_bt, use_shm_double);
if (n <= 8) {
printf("\nC (device):\n");
printMatrix(h_C, n, n);
}
// Verify the device result
if (use_bt)
verify_bt(h_C, n, "Device result");
else if (use_rand)
verify_rand(h_A, h_B, h_C, n);
else
verify(h_C, n, n, eps, "Device result");
// But not for random matrices
if (do_host) {
// Compare host and device results
if (use_bt)
verify_bt(h_C, hostC, n, "Device vs. host");
else if (!use_rand)
verify(h_C, hostC, n, n, eps, "Device vs. host");
}
// clean up memory
free(h_A);
free(h_B);
free(h_C);
if (do_host)
free(hostC);
assert(cudaSuccess == cudaFree(d_A));
assert(cudaSuccess == cudaFree(d_B));
assert(cudaSuccess == cudaFree(d_C));
cudaThreadExit();
}
|
0320c35ec143fbeb9b1a0097616be53098a9f7e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
using namespace std;
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void parasumKernel(float *input,unsigned int inputSize, float *output) {
extern __shared__ float shared[];
//initialize
float* stepSize = &shared[inputSize + 0];
float* step = &shared[inputSize + 1];
if(threadIdx.x == inputSize) {
*stepSize = 1;
*step = 1;
} else {
shared[threadIdx.x] = input[threadIdx.x];
}
__syncthreads();
*output = 2;
atomicAdd(&input[0], 1);
}
int main() {
const int inputSize = 100;
float output;
cout << "Generating input numbers from [1 to " << inputSize << "] ";
float input[inputSize];
for(int i = 0; i < inputSize; i++) {
input[i] = i + 1;
}
float* d_input;
float* d_output;
hipError_t cudaStatus;
cout << "OK\n Initializing GPU..";
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cout << "OK\n Allocating GPU memory..";
cudaStatus = hipMalloc((void**)&d_input, inputSize * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_output, sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cout << "OK\n Transferring input to GPU..";
cudaStatus = hipMemcpy(d_input, input, inputSize * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cout << "OK\n Launching kernel on GPU..";
//Release the kraken!
// +1 thread for write step size; +1 for step
hipLaunchKernelGGL(( parasumKernel), dim3(1), dim3(inputSize + 2), (inputSize * sizeof(float))+2, 0, d_input,inputSize, d_output);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(&output, d_output, sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cout << "OK\n Result: " << output;
Error:
hipFree(d_input);
hipFree(d_output);
getchar();
return cudaStatus;
}
int oldmain()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| 0320c35ec143fbeb9b1a0097616be53098a9f7e8.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
using namespace std;
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void parasumKernel(float *input,unsigned int inputSize, float *output) {
extern __shared__ float shared[];
//initialize
float* stepSize = &shared[inputSize + 0];
float* step = &shared[inputSize + 1];
if(threadIdx.x == inputSize) {
*stepSize = 1;
*step = 1;
} else {
shared[threadIdx.x] = input[threadIdx.x];
}
__syncthreads();
*output = 2;
atomicAdd(&input[0], 1);
}
int main() {
const int inputSize = 100;
float output;
cout << "Generating input numbers from [1 to " << inputSize << "] ";
float input[inputSize];
for(int i = 0; i < inputSize; i++) {
input[i] = i + 1;
}
float* d_input;
float* d_output;
cudaError_t cudaStatus;
cout << "OK\n Initializing GPU..";
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cout << "OK\n Allocating GPU memory..";
cudaStatus = cudaMalloc((void**)&d_input, inputSize * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_output, sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cout << "OK\n Transferring input to GPU..";
cudaStatus = cudaMemcpy(d_input, input, inputSize * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cout << "OK\n Launching kernel on GPU..";
//Release the kraken!
// +1 thread for write step size; +1 for step
parasumKernel<<<1, inputSize + 2, (inputSize * sizeof(float))+2>>>(d_input,inputSize, d_output);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(&output, d_output, sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cout << "OK\n Result: " << output;
Error:
cudaFree(d_input);
cudaFree(d_output);
getchar();
return cudaStatus;
}
int oldmain()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
94c2c436a7e5fb3b58858257d4d30597f8f139ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h> // CUDA Math.
#include <thrust/sort.h>
#include <thrust/functional.h>
#include "distance_measurer.h"
namespace radi {
DistanceMeasurer::DistanceMeasurer () : num_points_(0)
{ }
DistanceMeasurer::~DistanceMeasurer ()
{
if (dev_num_points_)
hipFree(dev_num_points_);
if (dev_points_)
hipFree(dev_points_);
}
void
DistanceMeasurer::setReferPointCloud (pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr refer_point_cloud)
{
num_points_ = refer_point_cloud->size ();
hipMalloc ((void **)&dev_num_points_, sizeof(int));
hipMemcpy(dev_num_points_, &num_points_, sizeof(int), hipMemcpyHostToDevice);
int total_size = num_points_ * 3;
float * host_points = (float *)malloc (total_size*sizeof(float));
for (int i = 0; i < num_points_; ++i)
{
host_points[i*3+0] = refer_point_cloud->points[i].x;
host_points[i*3+1] = refer_point_cloud->points[i].y;
host_points[i*3+2] = refer_point_cloud->points[i].z;
}
hipMalloc (&dev_points_, total_size*sizeof(float));
hipMemcpy (dev_points_, host_points, total_size*sizeof(float), hipMemcpyHostToDevice);
free (host_points);
}
float
DistanceMeasurer::calShortestDistance (const pcl::PointXYZRGB & point)
{
float host_point[3];
host_point[0] = point.x;
host_point[1] = point.y;
host_point[2] = point.z;
float * dev_point;
hipMalloc ((void **)&dev_point, 3*sizeof(float));
hipMemcpy(dev_point, host_point, 3*sizeof(float), hipMemcpyHostToDevice);
float * dev_distances;
hipMalloc ((void **)&dev_distances, num_points_*sizeof(float));
hipLaunchKernelGGL(( distPoint2Point), dim3((num_points_+255)/256), dim3(256), 0, 0, dev_point, dev_points_, dev_num_points_, dev_distances);
float * distances = (float *) malloc (num_points_*sizeof(float));
hipMemcpy(distances, dev_distances, num_points_*sizeof(float), hipMemcpyDeviceToHost);
thrust::stable_sort (distances, distances+num_points_, thrust::less_equal<float> ());
float min_distance = distances[0];
free (distances);
hipFree (dev_point);
hipFree (dev_distances);
return (min_distance);
}
__global__ void
distPoint2Point (const float * dev_point, const float * dev_points, const int * dev_num_points, float * dev_distances)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < dev_num_points[0])
{
const float * dev_point_in_cloud = &dev_points[tid*3];
dev_distances[tid] = norm3df (dev_point[0]-dev_point_in_cloud[0],
dev_point[1]-dev_point_in_cloud[1], dev_point[2]-dev_point_in_cloud[2]);
}
}
}
| 94c2c436a7e5fb3b58858257d4d30597f8f139ef.cu | #include <math.h> // CUDA Math.
#include <thrust/sort.h>
#include <thrust/functional.h>
#include "distance_measurer.h"
namespace radi {
DistanceMeasurer::DistanceMeasurer () : num_points_(0)
{ }
DistanceMeasurer::~DistanceMeasurer ()
{
if (dev_num_points_)
cudaFree(dev_num_points_);
if (dev_points_)
cudaFree(dev_points_);
}
void
DistanceMeasurer::setReferPointCloud (pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr refer_point_cloud)
{
num_points_ = refer_point_cloud->size ();
cudaMalloc ((void **)&dev_num_points_, sizeof(int));
cudaMemcpy(dev_num_points_, &num_points_, sizeof(int), cudaMemcpyHostToDevice);
int total_size = num_points_ * 3;
float * host_points = (float *)malloc (total_size*sizeof(float));
for (int i = 0; i < num_points_; ++i)
{
host_points[i*3+0] = refer_point_cloud->points[i].x;
host_points[i*3+1] = refer_point_cloud->points[i].y;
host_points[i*3+2] = refer_point_cloud->points[i].z;
}
cudaMalloc (&dev_points_, total_size*sizeof(float));
cudaMemcpy (dev_points_, host_points, total_size*sizeof(float), cudaMemcpyHostToDevice);
free (host_points);
}
float
DistanceMeasurer::calShortestDistance (const pcl::PointXYZRGB & point)
{
float host_point[3];
host_point[0] = point.x;
host_point[1] = point.y;
host_point[2] = point.z;
float * dev_point;
cudaMalloc ((void **)&dev_point, 3*sizeof(float));
cudaMemcpy(dev_point, host_point, 3*sizeof(float), cudaMemcpyHostToDevice);
float * dev_distances;
cudaMalloc ((void **)&dev_distances, num_points_*sizeof(float));
distPoint2Point<<<(num_points_+255)/256, 256>>> (dev_point, dev_points_, dev_num_points_, dev_distances);
float * distances = (float *) malloc (num_points_*sizeof(float));
cudaMemcpy(distances, dev_distances, num_points_*sizeof(float), cudaMemcpyDeviceToHost);
thrust::stable_sort (distances, distances+num_points_, thrust::less_equal<float> ());
float min_distance = distances[0];
free (distances);
cudaFree (dev_point);
cudaFree (dev_distances);
return (min_distance);
}
__global__ void
distPoint2Point (const float * dev_point, const float * dev_points, const int * dev_num_points, float * dev_distances)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < dev_num_points[0])
{
const float * dev_point_in_cloud = &dev_points[tid*3];
dev_distances[tid] = norm3df (dev_point[0]-dev_point_in_cloud[0],
dev_point[1]-dev_point_in_cloud[1], dev_point[2]-dev_point_in_cloud[2]);
}
}
}
|
cf44b0ab1af4787a70a8fae40dcea13556030fea.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <execution/LaunchContext.h>
#include <helpers/logger.h>
#include <exceptions/cuda_exception.h>
#include <helpers/cublasHelper.h>
#include <thread>
#include <execution/AffinityManager.h>
thread_local sd::ContextBuffers contextBuffers = sd::ContextBuffers();
namespace sd {
std::vector<std::shared_ptr<LaunchContext>> LaunchContext::_contexts = std::vector<std::shared_ptr<LaunchContext>>();
std::mutex LaunchContext::_mutex;
MAP_IMPL<int, std::mutex*> LaunchContext::_deviceMutexes;
////////////////////////////////////////////////////////////////////////
LaunchContext::LaunchContext(hipStream_t *cudaStream, hipStream_t& specialCudaStream, void* reductionPointer, void* scalarPointer, int* allocationPointer) {
//_cudaStream = cudaStream;
//_cudaSpecialStream = &specialCudaStream; // ideal is = new hipStream_t; *_cudaSpecialStream = specialCudaStream;
//_reductionPointer = reductionPointer;
//_scalarPointer = scalarPointer;
//_allocationPointer = allocationPointer;
_workspace = nullptr;
_isAllocated = false;
}
std::mutex* LaunchContext::deviceMutex() {
auto deviceId = AffinityManager::currentDeviceId();
return _deviceMutexes[deviceId];
}
LaunchContext::~LaunchContext() {
if (_isAllocated) {
}
}
////////////////////////////////////////////////////////////////////////
LaunchContext::LaunchContext() {
// default constructor, just to make clang/ranlib happy
_workspace = nullptr;
_deviceID = 0;
_isAllocated = true;
}
LaunchContext::LaunchContext(Nd4jPointer cudaStream, Nd4jPointer reductionPointer, Nd4jPointer scalarPointer, Nd4jPointer allocationPointer) {
_isAllocated = false;
//_cudaStream = reinterpret_cast<hipStream_t*>(cudaStream);
// _cudaSpecialStream = reinterpret_cast<hipStream_t*>(cudaStream);
//_reductionPointer = reductionPointer;
//_scalarPointer = scalarPointer;
//_allocationPointer = reinterpret_cast<int *>(allocationPointer);
}
LaunchContext* LaunchContext::defaultContext() {
/**
* This method returns LaunchContext, that has multiple entities within:
* 1) temporary buffers. they must be per-thread
* 2) CUDA stream. it must be either per-thread or per-device
* 3) cuBLAS handle. it must be per-device
*/
auto deviceId = AffinityManager::currentDeviceId();
{
// we need this block synchronous, to avoid double initialization etc
std::lock_guard<std::mutex> lock(_mutex);
if (LaunchContext::_contexts.empty()) {
// create one context per device
auto numDevices = AffinityManager::numberOfDevices();
_contexts.resize(numDevices);
for (int e = 0; e < numDevices; e++) {
_deviceMutexes[e] = new std::mutex();
AffinityManager::setCurrentNativeDevice(e);
LaunchContext::_contexts[e] = std::make_shared<LaunchContext>();
}
// don't forget to restore device back again
AffinityManager::setCurrentNativeDevice(deviceId);
}
}
// return context for current device
return LaunchContext::_contexts[deviceId].get();
}
void* LaunchContext::getReductionPointer () const {
return contextBuffers.reductionBuffer();
};
void* LaunchContext::getScalarPointer() const {
return contextBuffers.scalarBuffer();
};
int* LaunchContext::getAllocationPointer() const {
return reinterpret_cast<int*>(contextBuffers.allocationBuffer());
};
void* LaunchContext::getCublasHandle() const {
return CublasHelper::getInstance().handle();
};
void* LaunchContext::getCusolverHandle() const {
return CublasHelper::getInstance().solver();
};
hipStream_t* LaunchContext::getCudaStream() const {
return reinterpret_cast<hipStream_t*>(contextBuffers.execStream());
};
hipStream_t* LaunchContext::getCudaSpecialStream() const {
return reinterpret_cast<hipStream_t*>(contextBuffers.specialStream());;
};
void LaunchContext::setReductionPointer (void* reductionPointer) {
contextBuffers.setReductionBuffer(reductionPointer);
};
void LaunchContext::setScalarPointer(void* scalarPointer) {
contextBuffers.setScalarBuffer(scalarPointer);
};
void LaunchContext::setAllocationPointer(int* allocationPointer) {
contextBuffers.setAllocationBuffer(allocationPointer);
};
void LaunchContext::setCudaStream(hipStream_t* cudaStream) {
//_cudaStream = cudaStream;
};
void LaunchContext::setCudaSpecialStream(hipStream_t* cudaStream) {
//_cudaSpecialStream = cudaStream;
};
void LaunchContext::setCublasHandle(void *handle) {
_cublasHandle = handle;
};
void LaunchContext::swapContextBuffers(ContextBuffers &buffers) {
contextBuffers = buffers;
};
void LaunchContext::releaseBuffers() {
//nd4j_printf("LaunchContext::releaseBuffers() was invoked\n", "");
contextBuffers.release();
}
bool LaunchContext::isInitialized() {
return contextBuffers.isInitialized();
}
void* LaunchContext::getCuDnnHandle() const {
return CublasHelper::getInstance().cudnn();
}
sd::ErrorReference* LaunchContext::errorReference() {
return contextBuffers.errorReference();
}
void* LaunchContext::engine() {
return _engine;
}
} | cf44b0ab1af4787a70a8fae40dcea13556030fea.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <execution/LaunchContext.h>
#include <helpers/logger.h>
#include <exceptions/cuda_exception.h>
#include <helpers/cublasHelper.h>
#include <thread>
#include <execution/AffinityManager.h>
thread_local sd::ContextBuffers contextBuffers = sd::ContextBuffers();
namespace sd {
std::vector<std::shared_ptr<LaunchContext>> LaunchContext::_contexts = std::vector<std::shared_ptr<LaunchContext>>();
std::mutex LaunchContext::_mutex;
MAP_IMPL<int, std::mutex*> LaunchContext::_deviceMutexes;
////////////////////////////////////////////////////////////////////////
LaunchContext::LaunchContext(cudaStream_t *cudaStream, cudaStream_t& specialCudaStream, void* reductionPointer, void* scalarPointer, int* allocationPointer) {
//_cudaStream = cudaStream;
//_cudaSpecialStream = &specialCudaStream; // ideal is = new cudaStream_t; *_cudaSpecialStream = specialCudaStream;
//_reductionPointer = reductionPointer;
//_scalarPointer = scalarPointer;
//_allocationPointer = allocationPointer;
_workspace = nullptr;
_isAllocated = false;
}
std::mutex* LaunchContext::deviceMutex() {
auto deviceId = AffinityManager::currentDeviceId();
return _deviceMutexes[deviceId];
}
LaunchContext::~LaunchContext() {
if (_isAllocated) {
}
}
////////////////////////////////////////////////////////////////////////
LaunchContext::LaunchContext() {
// default constructor, just to make clang/ranlib happy
_workspace = nullptr;
_deviceID = 0;
_isAllocated = true;
}
LaunchContext::LaunchContext(Nd4jPointer cudaStream, Nd4jPointer reductionPointer, Nd4jPointer scalarPointer, Nd4jPointer allocationPointer) {
_isAllocated = false;
//_cudaStream = reinterpret_cast<cudaStream_t*>(cudaStream);
// _cudaSpecialStream = reinterpret_cast<cudaStream_t*>(cudaStream);
//_reductionPointer = reductionPointer;
//_scalarPointer = scalarPointer;
//_allocationPointer = reinterpret_cast<int *>(allocationPointer);
}
LaunchContext* LaunchContext::defaultContext() {
/**
* This method returns LaunchContext, that has multiple entities within:
* 1) temporary buffers. they must be per-thread
* 2) CUDA stream. it must be either per-thread or per-device
* 3) cuBLAS handle. it must be per-device
*/
auto deviceId = AffinityManager::currentDeviceId();
{
// we need this block synchronous, to avoid double initialization etc
std::lock_guard<std::mutex> lock(_mutex);
if (LaunchContext::_contexts.empty()) {
// create one context per device
auto numDevices = AffinityManager::numberOfDevices();
_contexts.resize(numDevices);
for (int e = 0; e < numDevices; e++) {
_deviceMutexes[e] = new std::mutex();
AffinityManager::setCurrentNativeDevice(e);
LaunchContext::_contexts[e] = std::make_shared<LaunchContext>();
}
// don't forget to restore device back again
AffinityManager::setCurrentNativeDevice(deviceId);
}
}
// return context for current device
return LaunchContext::_contexts[deviceId].get();
}
void* LaunchContext::getReductionPointer () const {
return contextBuffers.reductionBuffer();
};
void* LaunchContext::getScalarPointer() const {
return contextBuffers.scalarBuffer();
};
int* LaunchContext::getAllocationPointer() const {
return reinterpret_cast<int*>(contextBuffers.allocationBuffer());
};
void* LaunchContext::getCublasHandle() const {
return CublasHelper::getInstance().handle();
};
void* LaunchContext::getCusolverHandle() const {
return CublasHelper::getInstance().solver();
};
cudaStream_t* LaunchContext::getCudaStream() const {
return reinterpret_cast<cudaStream_t*>(contextBuffers.execStream());
};
cudaStream_t* LaunchContext::getCudaSpecialStream() const {
return reinterpret_cast<cudaStream_t*>(contextBuffers.specialStream());;
};
void LaunchContext::setReductionPointer (void* reductionPointer) {
contextBuffers.setReductionBuffer(reductionPointer);
};
void LaunchContext::setScalarPointer(void* scalarPointer) {
contextBuffers.setScalarBuffer(scalarPointer);
};
void LaunchContext::setAllocationPointer(int* allocationPointer) {
contextBuffers.setAllocationBuffer(allocationPointer);
};
void LaunchContext::setCudaStream(cudaStream_t* cudaStream) {
//_cudaStream = cudaStream;
};
void LaunchContext::setCudaSpecialStream(cudaStream_t* cudaStream) {
//_cudaSpecialStream = cudaStream;
};
void LaunchContext::setCublasHandle(void *handle) {
_cublasHandle = handle;
};
void LaunchContext::swapContextBuffers(ContextBuffers &buffers) {
contextBuffers = buffers;
};
void LaunchContext::releaseBuffers() {
//nd4j_printf("LaunchContext::releaseBuffers() was invoked\n", "");
contextBuffers.release();
}
bool LaunchContext::isInitialized() {
return contextBuffers.isInitialized();
}
void* LaunchContext::getCuDnnHandle() const {
return CublasHelper::getInstance().cudnn();
}
sd::ErrorReference* LaunchContext::errorReference() {
return contextBuffers.errorReference();
}
void* LaunchContext::engine() {
return _engine;
}
} |
f905b69610a056eb3ec18eb6a791458f7ff34697.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/core/op_registry.h"
#include "lite/kernels/cuda/leaky_relu_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
template <typename T>
__global__ void LeakyReluKernel(const int num,
const T alpha,
const T* input,
T* output) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num) {
#if __CUDA_ARCH__ >= 350
output[index] = __ldg(input + index) >= 0 ? __ldg(input + index)
: __ldg(input + index) * alpha;
#else
output[index] = input[index] >= 0 ? input[index] : input[index] * alpha;
#endif
}
}
void LeakyReluCompute::Run() {
auto& param = this->Param<param_t>();
auto& ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
int num = static_cast<int>(param.X->numel());
float alpha = param.Leaky_relu_alpha;
auto input = param.X->data<float>();
auto output = param.Out->mutable_data<float>(TARGET(kCUDA));
int threads = 1024;
int blocks = (num + threads - 1) / threads;
hipLaunchKernelGGL(( LeakyReluKernel), dim3(blocks), dim3(threads), 0, stream, num, alpha, input, output);
hipError_t error = hipGetLastError();
if (error != hipSuccess) LOG(INFO) << hipGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(leaky_relu,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::LeakyReluCompute,
def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
| f905b69610a056eb3ec18eb6a791458f7ff34697.cu | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/core/op_registry.h"
#include "lite/kernels/cuda/leaky_relu_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
template <typename T>
__global__ void LeakyReluKernel(const int num,
const T alpha,
const T* input,
T* output) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num) {
#if __CUDA_ARCH__ >= 350
output[index] = __ldg(input + index) >= 0 ? __ldg(input + index)
: __ldg(input + index) * alpha;
#else
output[index] = input[index] >= 0 ? input[index] : input[index] * alpha;
#endif
}
}
void LeakyReluCompute::Run() {
auto& param = this->Param<param_t>();
auto& ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
int num = static_cast<int>(param.X->numel());
float alpha = param.Leaky_relu_alpha;
auto input = param.X->data<float>();
auto output = param.Out->mutable_data<float>(TARGET(kCUDA));
int threads = 1024;
int blocks = (num + threads - 1) / threads;
LeakyReluKernel<<<blocks, threads, 0, stream>>>(num, alpha, input, output);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) LOG(INFO) << cudaGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(leaky_relu,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::LeakyReluCompute,
def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
|
c646842768c89ff35c12d26ba2dbe545ea7607a8.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <chrono>
#define NDEBUG 1
#include <prover_reference_functions.hpp>
#include "multiexp/reduce.cu"
// This is where all the FFTs happen
// template over the bundle of types and functions.
// Overwrites ca!
template <typename B>
typename B::vector_Fr *compute_H(size_t d, typename B::vector_Fr *ca,
typename B::vector_Fr *cb,
typename B::vector_Fr *cc) {
auto domain = B::get_evaluation_domain(d + 1);
B::domain_iFFT(domain, ca);
B::domain_iFFT(domain, cb);
B::domain_cosetFFT(domain, ca);
B::domain_cosetFFT(domain, cb);
// Use ca to store H
auto H_tmp = ca;
size_t m = B::domain_get_m(domain);
// for i in 0 to m: H_tmp[i] *= cb[i]
B::vector_Fr_muleq(H_tmp, cb, m);
B::domain_iFFT(domain, cc);
B::domain_cosetFFT(domain, cc);
m = B::domain_get_m(domain);
// for i in 0 to m: H_tmp[i] -= cc[i]
B::vector_Fr_subeq(H_tmp, cc, m);
B::domain_divide_by_Z_on_coset(domain, H_tmp);
B::domain_icosetFFT(domain, H_tmp);
m = B::domain_get_m(domain);
typename B::vector_Fr *H_res = B::vector_Fr_zeros(m + 1);
B::vector_Fr_copy_into(H_tmp, H_res, m);
return H_res;
}
static size_t read_size_t(FILE* input) {
size_t n;
fread((void *) &n, sizeof(size_t), 1, input);
return n;
}
template< typename B >
struct ec_type;
template<>
struct ec_type<mnt4753_libsnark> {
typedef ECp_MNT4 ECp;
typedef ECp2_MNT4 ECpe;
};
template<>
struct ec_type<mnt6753_libsnark> {
typedef ECp_MNT6 ECp;
typedef ECp3_MNT6 ECpe;
};
void
check_trailing(FILE *f, const char *name) {
long bytes_remaining = 0;
while (fgetc(f) != EOF)
++bytes_remaining;
if (bytes_remaining > 0)
fprintf(stderr, "!! Trailing characters in \"%s\": %ld\n", name, bytes_remaining);
}
static inline auto now() -> decltype(std::chrono::high_resolution_clock::now()) {
return std::chrono::high_resolution_clock::now();
}
template<typename T>
void
print_time(T &t1, const char *str) {
auto t2 = std::chrono::high_resolution_clock::now();
auto tim = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count();
printf("%s: %ld ms\n", str, tim);
t1 = t2;
}
template <const int R, const int C, typename B>
void prove_aux(
size_t primary_input_size,
size_t d,
size_t m,
const var *w,
decltype(std::chrono::high_resolution_clock::now()) t,
// const var *A_mults,
// var *out_A,
const var *B1_mults,
var *out_B1,
const var *B2_mults,
var *out_B2,
const var *L_mults,
var *out_L,
typename B::groth16_params *params,
typename B::groth16_input *inputs,
typename B::vector_Fr **coefficients_for_H,
typename B::vector_G1 **H,
typename B::G1 **evaluation_At,
typename B::G1 **evaluation_Bt1,
typename B::G2 **evaluation_Bt2,
typename B::G1 **evaluation_Ht,
typename B::G1 **evaluation_Lt,
typename B::G1 **scaled_Bt1,
typename B::G1 **Lt1_plus_scaled_Bt1,
typename B::G1 **final_C,
//hipStream_t &sA,
hipStream_t &sB1,
hipStream_t &sB2,
hipStream_t &sL)
{
auto t_gpu = t;
typedef typename ec_type<B>::ECp ECp;
typedef typename ec_type<B>::ECpe ECpe;
//ec_reduce_straus<ECp, C, R>(sA, out_A, A_mults, w, m + 1);
ec_reduce_straus<ECp, C, R>(sB1, out_B1, B1_mults, w, m + 1);
ec_reduce_straus<ECpe, C, 2*R>(sB2, out_B2, B2_mults, w, m + 1);
ec_reduce_straus<ECp, C, R>(sL, out_L, L_mults, w + (primary_input_size + 1) * ELT_LIMBS, m - primary_input_size);
print_time(t, "gpu launch");
(*evaluation_At) = B::multiexp_G1(B::input_w(inputs), B::params_A(params), m + 1);
//(*evaluation_Bt1) = B::multiexp_G1(B::input_w(inputs), B::params_B1(params), m + 1);
//(*evaluation_Bt2) = B::multiexp_G2(B::input_w(inputs), B::params_B2(params), m + 1);
// Do calculations relating to H on CPU after having set the GPU in
// motion
(*H) = B::params_H(params);
(*coefficients_for_H) =
compute_H<B>(d, B::input_ca(inputs), B::input_cb(inputs), B::input_cc(inputs));
(*evaluation_Ht) = B::multiexp_G1(*coefficients_for_H, *H, d);
print_time(t, "cpu 1");
hipDeviceSynchronize();
//hipStreamSynchronize(sA);
//G1 *evaluation_At = B::read_pt_ECp(out_A);
hipStreamSynchronize(sB1);
*evaluation_Bt1 = B::read_pt_ECp(out_B1);
hipStreamSynchronize(sB2);
*evaluation_Bt2 = B::read_pt_ECpe(out_B2);
hipStreamSynchronize(sL);
*evaluation_Lt = B::read_pt_ECp(out_L);
print_time(t_gpu, "gpu e2e");
*scaled_Bt1 = B::G1_scale(B::input_r(inputs), *evaluation_Bt1);
*Lt1_plus_scaled_Bt1 = B::G1_add(*evaluation_Lt, *scaled_Bt1);
*final_C = B::G1_add(*evaluation_Ht, *Lt1_plus_scaled_Bt1);
print_time(t, "cpu 2");
}
template< typename EC >
var_ptr copy_points_affine(size_t n, const void *src)
{
typedef typename EC::field_type FF;
static constexpr size_t coord_bytes = FF::DEGREE * ELT_BYTES;
static constexpr size_t aff_pt_bytes = 2 * coord_bytes;
size_t total_aff_bytes = n * aff_pt_bytes;
auto mem = allocate_memory(total_aff_bytes);
memcpy((void *)mem.get(), src, total_aff_bytes);
return mem;
}
var_ptr copy_field_elts(size_t n, const void *src)
{
static constexpr size_t field_elt_size = ELT_BYTES;
size_t total_size = n * field_elt_size;
auto mem = allocate_memory(total_size);
memcpy((void *)mem.get(), src, total_size);
return mem;
}
template <const int R, const int C, typename B>
void prove(
typename B::G1 **A_out,
typename B::G2 **B_out,
typename B::G1 **C_out,
size_t primary_input_size,
size_t d,
size_t m,
const var *w,
// const var *A_mults,
const var *B1_mults,
const var *B2_mults,
const var *L_mults,
typename B::groth16_params *params,
typename B::groth16_input *inputs)
{
typedef typename ec_type<B>::ECpe ECpe;
size_t space = ((m + 1) + R - 1) / R;
//auto out_A = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
// auto A_mults_dev = copy_points_affine<ec_type<mnt4753_libsnark>::ECp>(((1U << C) - 1)*(m + 1), (void *) A_mults);
auto out_B1 = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
auto B1_mults_dev = copy_points_affine<ec_type<mnt4753_libsnark>::ECp>(((1U << C) - 1)*(m + 1), (void *) B1_mults);
auto out_B2 = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
auto B2_mults_dev = copy_points_affine<ec_type<mnt4753_libsnark>::ECpe>(((1U << C) - 1)*(m + 1), (void *) B2_mults);
auto out_L = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
auto L_mults_dev = copy_points_affine<ec_type<mnt4753_libsnark>::ECp>(((1U << C) - 1)*(m - 1), (void *) L_mults);
auto w_dev = copy_field_elts(m + 1, (void *) w);
hipStream_t //sA,
sB1, sB2, sL;
typename B::vector_Fr *coefficients_for_H = NULL;
typename B::vector_G1 *H = NULL;
typename B::G1 *evaluation_Bt1 = NULL,
*evaluation_Ht = NULL,
*evaluation_Lt = NULL,
*scaled_Bt1 = NULL,
*Lt1_plus_scaled_Bt1 = NULL;
prove_aux<R, C, B>(
primary_input_size,
d,
m,
w_dev.get(),
now(),
// A_mults_dev.get(),
// out_A.get(),
B1_mults_dev.get(),
out_B1.get(),
B2_mults_dev.get(),
out_B2.get(),
L_mults_dev.get(),
out_L.get(),
params,
inputs,
&coefficients_for_H,
&H,
A_out,
&evaluation_Bt1,
B_out,
&evaluation_Ht,
&evaluation_Lt,
&scaled_Bt1,
&Lt1_plus_scaled_Bt1,
C_out,
//sA,
sB1,
sB2,
sL);
//hipStreamDestroy(sA);
hipStreamDestroy(sB1);
hipStreamDestroy(sB2);
hipStreamDestroy(sL);
B::delete_vector_G1(H);
B::delete_G1(evaluation_Bt1);
B::delete_G1(evaluation_Ht);
B::delete_G1(evaluation_Lt);
B::delete_G1(scaled_Bt1);
B::delete_G1(Lt1_plus_scaled_Bt1);
B::delete_vector_Fr(coefficients_for_H);
//B::groth16_output_write(evaluation_At, evaluation_Bt2, final_C, output_path);
}
void mnt4753_cuda_prove(
typename mnt4753_libsnark::G1 **A_out,
typename mnt4753_libsnark::G2 **B_out,
typename mnt4753_libsnark::G1 **C_out,
size_t primary_input_size,
size_t d,
size_t m,
const var *w,
// const var *A_mults,
const var *B1_mults,
const var *B2_mults,
const var *L_mults,
typename mnt4753_libsnark::groth16_params *params,
typename mnt4753_libsnark::groth16_input *inputs) {
static constexpr int R = 32;
static constexpr int C = 5;
prove<R, C, mnt4753_libsnark>(
A_out,
B_out,
C_out,
primary_input_size,
d,
m,
w,
// A_mults,
B1_mults,
B2_mults,
L_mults,
params,
inputs);
}
void mnt6753_cuda_prove(
typename mnt6753_libsnark::G1 **A_out,
typename mnt6753_libsnark::G2 **B_out,
typename mnt6753_libsnark::G1 **C_out,
size_t primary_input_size,
size_t d,
size_t m,
const var *w,
// const var *A_mults,
const var *B1_mults,
const var *B2_mults,
const var *L_mults,
typename mnt6753_libsnark::groth16_params *params,
typename mnt6753_libsnark::groth16_input *inputs) {
static constexpr int R = 32;
static constexpr int C = 5;
prove<R, C, mnt6753_libsnark>(
A_out,
B_out,
C_out,
primary_input_size,
d,
m,
w,
// A_mults,
B1_mults,
B2_mults,
L_mults,
params,
inputs);
}
extern "C" {
var *mnt4753_cuda_load_points_affine(size_t n, FILE *inputs) {
return load_points_affine<ec_type<mnt4753_libsnark>::ECp>(n, inputs).release();
}
var *mnt4753_cuda_load_extension_points_affine(size_t n, FILE *inputs) {
return load_points_affine<ec_type<mnt4753_libsnark>::ECpe>(n, inputs).release();
}
var *mnt6753_cuda_load_points_affine(size_t n, FILE *inputs) {
return load_points_affine<ec_type<mnt6753_libsnark>::ECp>(n, inputs).release();
}
var *mnt6753_cuda_load_extension_points_affine(size_t n, FILE *inputs) {
return load_points_affine<ec_type<mnt6753_libsnark>::ECpe>(n, inputs).release();
}
}
template <typename B>
void run_prover(
const char *params_path,
const char *input_path,
const char *output_path,
const char *preprocessed_path)
{
B::init_public_params();
size_t primary_input_size = 1;
auto beginning = now();
auto t = beginning;
FILE *params_file = fopen(params_path, "r");
size_t d = read_size_t(params_file);
size_t m = read_size_t(params_file);
rewind(params_file);
printf("d = %zu, m = %zu\n", d, m);
typedef typename ec_type<B>::ECp ECp;
typedef typename ec_type<B>::ECpe ECpe;
typedef typename B::G1 G1;
typedef typename B::G2 G2;
typedef typename B::vector_Fr vector_Fr;
typedef typename B::vector_G1 vector_G1;
static constexpr int R = 32;
static constexpr int C = 5;
FILE *preprocessed_file = fopen(preprocessed_path, "r");
size_t space = ((m + 1) + R - 1) / R;
//auto A_mults = load_points_affine<ECp>(((1U << C) - 1)*(m + 1), preprocessed_file);
//auto out_A = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
auto B1_mults = load_points_affine<ECp>(((1U << C) - 1)*(m + 1), preprocessed_file);
auto out_B1 = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
auto B2_mults = load_points_affine<ECpe>(((1U << C) - 1)*(m + 1), preprocessed_file);
auto out_B2 = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
auto L_mults = load_points_affine<ECp>(((1U << C) - 1)*(m - 1), preprocessed_file);
auto out_L = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
fclose(preprocessed_file);
print_time(t, "load preprocessing");
auto params = B::read_params(params_file, d, m);
fclose(params_file);
print_time(t, "load params");
auto t_main = t;
FILE *inputs_file = fopen(input_path, "r");
auto w_ = load_scalars(m + 1, inputs_file);
rewind(inputs_file);
auto inputs = B::read_input(inputs_file, d, m);
fclose(inputs_file);
print_time(t, "load inputs");
const var *w = w_.get();
hipStream_t //sA,
sB1, sB2, sL;
vector_Fr *coefficients_for_H = NULL;
vector_G1 *H = NULL;
G1 *evaluation_At = NULL,
*evaluation_Bt1 = NULL,
*evaluation_Ht = NULL,
*evaluation_Lt = NULL,
*scaled_Bt1 = NULL,
*Lt1_plus_scaled_Bt1 = NULL,
*final_C = NULL;
G2 *evaluation_Bt2 = NULL;
prove_aux<R, C, B>(
primary_input_size,
d,
m,
w,
t,
// A_mults.get(),
// out_A.get(),
B1_mults.get(),
out_B1.get(),
B2_mults.get(),
out_B2.get(),
L_mults.get(),
out_L.get(),
params,
inputs,
&coefficients_for_H,
&H,
&evaluation_At,
&evaluation_Bt1,
&evaluation_Bt2,
&evaluation_Ht,
&evaluation_Lt,
&scaled_Bt1,
&Lt1_plus_scaled_Bt1,
&final_C,
//sA,
sB1,
sB2,
sL);
B::groth16_output_write(evaluation_At, evaluation_Bt2, final_C, output_path);
print_time(t, "store");
print_time(t_main, "Total time from input to output: ");
//hipStreamDestroy(sA);
hipStreamDestroy(sB1);
hipStreamDestroy(sB2);
hipStreamDestroy(sL);
B::delete_vector_G1(H);
B::delete_G1(evaluation_At);
B::delete_G1(evaluation_Bt1);
B::delete_G2(evaluation_Bt2);
B::delete_G1(evaluation_Ht);
B::delete_G1(evaluation_Lt);
B::delete_G1(scaled_Bt1);
B::delete_G1(Lt1_plus_scaled_Bt1);
B::delete_vector_Fr(coefficients_for_H);
B::delete_groth16_input(inputs);
B::delete_groth16_params(params);
print_time(t, "cleanup");
print_time(beginning, "Total runtime (incl. file reads)");
}
| c646842768c89ff35c12d26ba2dbe545ea7607a8.cu | #include <string>
#include <chrono>
#define NDEBUG 1
#include <prover_reference_functions.hpp>
#include "multiexp/reduce.cu"
// This is where all the FFTs happen
// template over the bundle of types and functions.
// Overwrites ca!
template <typename B>
typename B::vector_Fr *compute_H(size_t d, typename B::vector_Fr *ca,
typename B::vector_Fr *cb,
typename B::vector_Fr *cc) {
auto domain = B::get_evaluation_domain(d + 1);
B::domain_iFFT(domain, ca);
B::domain_iFFT(domain, cb);
B::domain_cosetFFT(domain, ca);
B::domain_cosetFFT(domain, cb);
// Use ca to store H
auto H_tmp = ca;
size_t m = B::domain_get_m(domain);
// for i in 0 to m: H_tmp[i] *= cb[i]
B::vector_Fr_muleq(H_tmp, cb, m);
B::domain_iFFT(domain, cc);
B::domain_cosetFFT(domain, cc);
m = B::domain_get_m(domain);
// for i in 0 to m: H_tmp[i] -= cc[i]
B::vector_Fr_subeq(H_tmp, cc, m);
B::domain_divide_by_Z_on_coset(domain, H_tmp);
B::domain_icosetFFT(domain, H_tmp);
m = B::domain_get_m(domain);
typename B::vector_Fr *H_res = B::vector_Fr_zeros(m + 1);
B::vector_Fr_copy_into(H_tmp, H_res, m);
return H_res;
}
static size_t read_size_t(FILE* input) {
size_t n;
fread((void *) &n, sizeof(size_t), 1, input);
return n;
}
template< typename B >
struct ec_type;
template<>
struct ec_type<mnt4753_libsnark> {
typedef ECp_MNT4 ECp;
typedef ECp2_MNT4 ECpe;
};
template<>
struct ec_type<mnt6753_libsnark> {
typedef ECp_MNT6 ECp;
typedef ECp3_MNT6 ECpe;
};
void
check_trailing(FILE *f, const char *name) {
long bytes_remaining = 0;
while (fgetc(f) != EOF)
++bytes_remaining;
if (bytes_remaining > 0)
fprintf(stderr, "!! Trailing characters in \"%s\": %ld\n", name, bytes_remaining);
}
static inline auto now() -> decltype(std::chrono::high_resolution_clock::now()) {
return std::chrono::high_resolution_clock::now();
}
template<typename T>
void
print_time(T &t1, const char *str) {
auto t2 = std::chrono::high_resolution_clock::now();
auto tim = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count();
printf("%s: %ld ms\n", str, tim);
t1 = t2;
}
template <const int R, const int C, typename B>
void prove_aux(
size_t primary_input_size,
size_t d,
size_t m,
const var *w,
decltype(std::chrono::high_resolution_clock::now()) t,
// const var *A_mults,
// var *out_A,
const var *B1_mults,
var *out_B1,
const var *B2_mults,
var *out_B2,
const var *L_mults,
var *out_L,
typename B::groth16_params *params,
typename B::groth16_input *inputs,
typename B::vector_Fr **coefficients_for_H,
typename B::vector_G1 **H,
typename B::G1 **evaluation_At,
typename B::G1 **evaluation_Bt1,
typename B::G2 **evaluation_Bt2,
typename B::G1 **evaluation_Ht,
typename B::G1 **evaluation_Lt,
typename B::G1 **scaled_Bt1,
typename B::G1 **Lt1_plus_scaled_Bt1,
typename B::G1 **final_C,
//cudaStream_t &sA,
cudaStream_t &sB1,
cudaStream_t &sB2,
cudaStream_t &sL)
{
auto t_gpu = t;
typedef typename ec_type<B>::ECp ECp;
typedef typename ec_type<B>::ECpe ECpe;
//ec_reduce_straus<ECp, C, R>(sA, out_A, A_mults, w, m + 1);
ec_reduce_straus<ECp, C, R>(sB1, out_B1, B1_mults, w, m + 1);
ec_reduce_straus<ECpe, C, 2*R>(sB2, out_B2, B2_mults, w, m + 1);
ec_reduce_straus<ECp, C, R>(sL, out_L, L_mults, w + (primary_input_size + 1) * ELT_LIMBS, m - primary_input_size);
print_time(t, "gpu launch");
(*evaluation_At) = B::multiexp_G1(B::input_w(inputs), B::params_A(params), m + 1);
//(*evaluation_Bt1) = B::multiexp_G1(B::input_w(inputs), B::params_B1(params), m + 1);
//(*evaluation_Bt2) = B::multiexp_G2(B::input_w(inputs), B::params_B2(params), m + 1);
// Do calculations relating to H on CPU after having set the GPU in
// motion
(*H) = B::params_H(params);
(*coefficients_for_H) =
compute_H<B>(d, B::input_ca(inputs), B::input_cb(inputs), B::input_cc(inputs));
(*evaluation_Ht) = B::multiexp_G1(*coefficients_for_H, *H, d);
print_time(t, "cpu 1");
cudaDeviceSynchronize();
//cudaStreamSynchronize(sA);
//G1 *evaluation_At = B::read_pt_ECp(out_A);
cudaStreamSynchronize(sB1);
*evaluation_Bt1 = B::read_pt_ECp(out_B1);
cudaStreamSynchronize(sB2);
*evaluation_Bt2 = B::read_pt_ECpe(out_B2);
cudaStreamSynchronize(sL);
*evaluation_Lt = B::read_pt_ECp(out_L);
print_time(t_gpu, "gpu e2e");
*scaled_Bt1 = B::G1_scale(B::input_r(inputs), *evaluation_Bt1);
*Lt1_plus_scaled_Bt1 = B::G1_add(*evaluation_Lt, *scaled_Bt1);
*final_C = B::G1_add(*evaluation_Ht, *Lt1_plus_scaled_Bt1);
print_time(t, "cpu 2");
}
template< typename EC >
var_ptr copy_points_affine(size_t n, const void *src)
{
typedef typename EC::field_type FF;
static constexpr size_t coord_bytes = FF::DEGREE * ELT_BYTES;
static constexpr size_t aff_pt_bytes = 2 * coord_bytes;
size_t total_aff_bytes = n * aff_pt_bytes;
auto mem = allocate_memory(total_aff_bytes);
memcpy((void *)mem.get(), src, total_aff_bytes);
return mem;
}
var_ptr copy_field_elts(size_t n, const void *src)
{
static constexpr size_t field_elt_size = ELT_BYTES;
size_t total_size = n * field_elt_size;
auto mem = allocate_memory(total_size);
memcpy((void *)mem.get(), src, total_size);
return mem;
}
template <const int R, const int C, typename B>
void prove(
typename B::G1 **A_out,
typename B::G2 **B_out,
typename B::G1 **C_out,
size_t primary_input_size,
size_t d,
size_t m,
const var *w,
// const var *A_mults,
const var *B1_mults,
const var *B2_mults,
const var *L_mults,
typename B::groth16_params *params,
typename B::groth16_input *inputs)
{
typedef typename ec_type<B>::ECpe ECpe;
size_t space = ((m + 1) + R - 1) / R;
//auto out_A = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
// auto A_mults_dev = copy_points_affine<ec_type<mnt4753_libsnark>::ECp>(((1U << C) - 1)*(m + 1), (void *) A_mults);
auto out_B1 = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
auto B1_mults_dev = copy_points_affine<ec_type<mnt4753_libsnark>::ECp>(((1U << C) - 1)*(m + 1), (void *) B1_mults);
auto out_B2 = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
auto B2_mults_dev = copy_points_affine<ec_type<mnt4753_libsnark>::ECpe>(((1U << C) - 1)*(m + 1), (void *) B2_mults);
auto out_L = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
auto L_mults_dev = copy_points_affine<ec_type<mnt4753_libsnark>::ECp>(((1U << C) - 1)*(m - 1), (void *) L_mults);
auto w_dev = copy_field_elts(m + 1, (void *) w);
cudaStream_t //sA,
sB1, sB2, sL;
typename B::vector_Fr *coefficients_for_H = NULL;
typename B::vector_G1 *H = NULL;
typename B::G1 *evaluation_Bt1 = NULL,
*evaluation_Ht = NULL,
*evaluation_Lt = NULL,
*scaled_Bt1 = NULL,
*Lt1_plus_scaled_Bt1 = NULL;
prove_aux<R, C, B>(
primary_input_size,
d,
m,
w_dev.get(),
now(),
// A_mults_dev.get(),
// out_A.get(),
B1_mults_dev.get(),
out_B1.get(),
B2_mults_dev.get(),
out_B2.get(),
L_mults_dev.get(),
out_L.get(),
params,
inputs,
&coefficients_for_H,
&H,
A_out,
&evaluation_Bt1,
B_out,
&evaluation_Ht,
&evaluation_Lt,
&scaled_Bt1,
&Lt1_plus_scaled_Bt1,
C_out,
//sA,
sB1,
sB2,
sL);
//cudaStreamDestroy(sA);
cudaStreamDestroy(sB1);
cudaStreamDestroy(sB2);
cudaStreamDestroy(sL);
B::delete_vector_G1(H);
B::delete_G1(evaluation_Bt1);
B::delete_G1(evaluation_Ht);
B::delete_G1(evaluation_Lt);
B::delete_G1(scaled_Bt1);
B::delete_G1(Lt1_plus_scaled_Bt1);
B::delete_vector_Fr(coefficients_for_H);
//B::groth16_output_write(evaluation_At, evaluation_Bt2, final_C, output_path);
}
void mnt4753_cuda_prove(
typename mnt4753_libsnark::G1 **A_out,
typename mnt4753_libsnark::G2 **B_out,
typename mnt4753_libsnark::G1 **C_out,
size_t primary_input_size,
size_t d,
size_t m,
const var *w,
// const var *A_mults,
const var *B1_mults,
const var *B2_mults,
const var *L_mults,
typename mnt4753_libsnark::groth16_params *params,
typename mnt4753_libsnark::groth16_input *inputs) {
static constexpr int R = 32;
static constexpr int C = 5;
prove<R, C, mnt4753_libsnark>(
A_out,
B_out,
C_out,
primary_input_size,
d,
m,
w,
// A_mults,
B1_mults,
B2_mults,
L_mults,
params,
inputs);
}
void mnt6753_cuda_prove(
typename mnt6753_libsnark::G1 **A_out,
typename mnt6753_libsnark::G2 **B_out,
typename mnt6753_libsnark::G1 **C_out,
size_t primary_input_size,
size_t d,
size_t m,
const var *w,
// const var *A_mults,
const var *B1_mults,
const var *B2_mults,
const var *L_mults,
typename mnt6753_libsnark::groth16_params *params,
typename mnt6753_libsnark::groth16_input *inputs) {
static constexpr int R = 32;
static constexpr int C = 5;
prove<R, C, mnt6753_libsnark>(
A_out,
B_out,
C_out,
primary_input_size,
d,
m,
w,
// A_mults,
B1_mults,
B2_mults,
L_mults,
params,
inputs);
}
extern "C" {
var *mnt4753_cuda_load_points_affine(size_t n, FILE *inputs) {
return load_points_affine<ec_type<mnt4753_libsnark>::ECp>(n, inputs).release();
}
var *mnt4753_cuda_load_extension_points_affine(size_t n, FILE *inputs) {
return load_points_affine<ec_type<mnt4753_libsnark>::ECpe>(n, inputs).release();
}
var *mnt6753_cuda_load_points_affine(size_t n, FILE *inputs) {
return load_points_affine<ec_type<mnt6753_libsnark>::ECp>(n, inputs).release();
}
var *mnt6753_cuda_load_extension_points_affine(size_t n, FILE *inputs) {
return load_points_affine<ec_type<mnt6753_libsnark>::ECpe>(n, inputs).release();
}
}
template <typename B>
void run_prover(
const char *params_path,
const char *input_path,
const char *output_path,
const char *preprocessed_path)
{
B::init_public_params();
size_t primary_input_size = 1;
auto beginning = now();
auto t = beginning;
FILE *params_file = fopen(params_path, "r");
size_t d = read_size_t(params_file);
size_t m = read_size_t(params_file);
rewind(params_file);
printf("d = %zu, m = %zu\n", d, m);
typedef typename ec_type<B>::ECp ECp;
typedef typename ec_type<B>::ECpe ECpe;
typedef typename B::G1 G1;
typedef typename B::G2 G2;
typedef typename B::vector_Fr vector_Fr;
typedef typename B::vector_G1 vector_G1;
static constexpr int R = 32;
static constexpr int C = 5;
FILE *preprocessed_file = fopen(preprocessed_path, "r");
size_t space = ((m + 1) + R - 1) / R;
//auto A_mults = load_points_affine<ECp>(((1U << C) - 1)*(m + 1), preprocessed_file);
//auto out_A = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
auto B1_mults = load_points_affine<ECp>(((1U << C) - 1)*(m + 1), preprocessed_file);
auto out_B1 = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
auto B2_mults = load_points_affine<ECpe>(((1U << C) - 1)*(m + 1), preprocessed_file);
auto out_B2 = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
auto L_mults = load_points_affine<ECp>(((1U << C) - 1)*(m - 1), preprocessed_file);
auto out_L = allocate_memory(space * ECpe::NELTS * ELT_BYTES);
fclose(preprocessed_file);
print_time(t, "load preprocessing");
auto params = B::read_params(params_file, d, m);
fclose(params_file);
print_time(t, "load params");
auto t_main = t;
FILE *inputs_file = fopen(input_path, "r");
auto w_ = load_scalars(m + 1, inputs_file);
rewind(inputs_file);
auto inputs = B::read_input(inputs_file, d, m);
fclose(inputs_file);
print_time(t, "load inputs");
const var *w = w_.get();
cudaStream_t //sA,
sB1, sB2, sL;
vector_Fr *coefficients_for_H = NULL;
vector_G1 *H = NULL;
G1 *evaluation_At = NULL,
*evaluation_Bt1 = NULL,
*evaluation_Ht = NULL,
*evaluation_Lt = NULL,
*scaled_Bt1 = NULL,
*Lt1_plus_scaled_Bt1 = NULL,
*final_C = NULL;
G2 *evaluation_Bt2 = NULL;
prove_aux<R, C, B>(
primary_input_size,
d,
m,
w,
t,
// A_mults.get(),
// out_A.get(),
B1_mults.get(),
out_B1.get(),
B2_mults.get(),
out_B2.get(),
L_mults.get(),
out_L.get(),
params,
inputs,
&coefficients_for_H,
&H,
&evaluation_At,
&evaluation_Bt1,
&evaluation_Bt2,
&evaluation_Ht,
&evaluation_Lt,
&scaled_Bt1,
&Lt1_plus_scaled_Bt1,
&final_C,
//sA,
sB1,
sB2,
sL);
B::groth16_output_write(evaluation_At, evaluation_Bt2, final_C, output_path);
print_time(t, "store");
print_time(t_main, "Total time from input to output: ");
//cudaStreamDestroy(sA);
cudaStreamDestroy(sB1);
cudaStreamDestroy(sB2);
cudaStreamDestroy(sL);
B::delete_vector_G1(H);
B::delete_G1(evaluation_At);
B::delete_G1(evaluation_Bt1);
B::delete_G2(evaluation_Bt2);
B::delete_G1(evaluation_Ht);
B::delete_G1(evaluation_Lt);
B::delete_G1(scaled_Bt1);
B::delete_G1(Lt1_plus_scaled_Bt1);
B::delete_vector_Fr(coefficients_for_H);
B::delete_groth16_input(inputs);
B::delete_groth16_params(params);
print_time(t, "cleanup");
print_time(beginning, "Total runtime (incl. file reads)");
}
|
11de65ce9da0f562cca3de69340e67c4a8df9c4f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/memory/pad.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/memory_utils.h"
#include "cudakernel/math/math.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#include <hip/hip_fp16.h>
template <typename T>
__global__ void ppl_cukernel_range(
int64_t num_elems,
const T* start,
const T* delta,
T* output)
{
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
output[index] = start[0] + index * delta[0];
}
template <>
__global__ void ppl_cukernel_range<half>(
int64_t num_elems,
const half* start,
const half* delta,
half* output)
{
typedef Math<half, half, half> OpMath;
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
output[index] = OpMath::add(start[0], OpMath::mul(delta[0], __ll2half_rn(index)));
}
ppl::common::RetCode PPLCUDARangeForwardImp(
hipStream_t stream,
const void* start,
const void* delta,
ppl::nn::TensorShape* output_shape,
void* output)
{
int block_size = 256;
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int grid_size = (num_elems + block_size - 1) / block_size;
switch (output_shape->GetDataType()) {
case ppl::common::DATATYPE_FLOAT32:
hipLaunchKernelGGL(( ppl_cukernel_range<float>), dim3(grid_size), dim3(block_size), 0, stream, num_elems, (float*)start, (float*)delta, (float*)output);
break;
case ppl::common::DATATYPE_FLOAT16:
hipLaunchKernelGGL(( ppl_cukernel_range<half>), dim3(grid_size), dim3(block_size), 0, stream, num_elems, (half*)start, (half*)delta, (half*)output);
break;
case ppl::common::DATATYPE_INT64:
hipLaunchKernelGGL(( ppl_cukernel_range<int64_t>), dim3(grid_size), dim3(block_size), 0, stream, num_elems, (int64_t*)start, (int64_t*)delta, (int64_t*)output);
break;
default:
return ppl::common::RC_UNSUPPORTED;
}
// ppl_cukernel_range<<<grid_size, block_size, 0, stream>>>(num_elems, start, delta, (T*)output);
return ppl::common::RC_SUCCESS;
}
| 11de65ce9da0f562cca3de69340e67c4a8df9c4f.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/memory/pad.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/memory_utils.h"
#include "cudakernel/math/math.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#include <cuda_fp16.h>
template <typename T>
__global__ void ppl_cukernel_range(
int64_t num_elems,
const T* start,
const T* delta,
T* output)
{
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
output[index] = start[0] + index * delta[0];
}
template <>
__global__ void ppl_cukernel_range<half>(
int64_t num_elems,
const half* start,
const half* delta,
half* output)
{
typedef Math<half, half, half> OpMath;
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
output[index] = OpMath::add(start[0], OpMath::mul(delta[0], __ll2half_rn(index)));
}
ppl::common::RetCode PPLCUDARangeForwardImp(
cudaStream_t stream,
const void* start,
const void* delta,
ppl::nn::TensorShape* output_shape,
void* output)
{
int block_size = 256;
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int grid_size = (num_elems + block_size - 1) / block_size;
switch (output_shape->GetDataType()) {
case ppl::common::DATATYPE_FLOAT32:
ppl_cukernel_range<float><<<grid_size, block_size, 0, stream>>>(num_elems, (float*)start, (float*)delta, (float*)output);
break;
case ppl::common::DATATYPE_FLOAT16:
ppl_cukernel_range<half><<<grid_size, block_size, 0, stream>>>(num_elems, (half*)start, (half*)delta, (half*)output);
break;
case ppl::common::DATATYPE_INT64:
ppl_cukernel_range<int64_t><<<grid_size, block_size, 0, stream>>>(num_elems, (int64_t*)start, (int64_t*)delta, (int64_t*)output);
break;
default:
return ppl::common::RC_UNSUPPORTED;
}
// ppl_cukernel_range<<<grid_size, block_size, 0, stream>>>(num_elems, start, delta, (T*)output);
return ppl::common::RC_SUCCESS;
}
|
b3d1bbdefa7e6026421601ef610c70f01a59306b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// includes, system
#include <stdio.h>
#include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Part 1 of 1: implement the kernel
__global__ void reverseArrayBlock( int *d_output, int *d_input )
{
int in_position = threadIdx.x;
int out_position = blockDim.x - 1 - threadIdx.x;
d_output[out_position] = d_input[in_position];
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory and size
int *h_a;
int dimA = 256;
// pointer for device memory
int *d_b, *d_a;
// define grid and block size
int numBlocks = 1;
int numThreadsPerBlock = dimA;
// allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
hipMalloc( (void **) &d_a, memSize );
hipMalloc( (void **) &d_b, memSize );
// Initialize input array on host
for (int i = 0; i < dimA; ++i)
{
h_a[i] = i;
}
// Copy host array to device array
hipMemcpy( d_a, h_a, memSize, hipMemcpyHostToDevice );
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
hipLaunchKernelGGL(( reverseArrayBlock), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_b, d_a );
// block until the device has completed
hipDeviceSynchronize();
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
// device to host copy
hipMemcpy( h_a, d_b, memSize, hipMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("memcpy");
// verify the data returned to the host is correct
for (int i = 0; i < dimA; i++)
{
assert(h_a[i] == dimA - 1 - i );
}
// free device memory
hipFree(d_a);
hipFree(d_b);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
} | b3d1bbdefa7e6026421601ef610c70f01a59306b.cu | /*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// includes, system
#include <stdio.h>
#include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Part 1 of 1: implement the kernel
__global__ void reverseArrayBlock( int *d_output, int *d_input )
{
int in_position = threadIdx.x;
int out_position = blockDim.x - 1 - threadIdx.x;
d_output[out_position] = d_input[in_position];
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory and size
int *h_a;
int dimA = 256;
// pointer for device memory
int *d_b, *d_a;
// define grid and block size
int numBlocks = 1;
int numThreadsPerBlock = dimA;
// allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
cudaMalloc( (void **) &d_a, memSize );
cudaMalloc( (void **) &d_b, memSize );
// Initialize input array on host
for (int i = 0; i < dimA; ++i)
{
h_a[i] = i;
}
// Copy host array to device array
cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice );
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
reverseArrayBlock<<< dimGrid, dimBlock >>>( d_b, d_a );
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
// device to host copy
cudaMemcpy( h_a, d_b, memSize, cudaMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("memcpy");
// verify the data returned to the host is correct
for (int i = 0; i < dimA; i++)
{
assert(h_a[i] == dimA - 1 - i );
}
// free device memory
cudaFree(d_a);
cudaFree(d_b);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
} |
73d17d7577a440191e2c6b46229226d75f71d9a8.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************************
* Author: Tao Rui
* : V1.0 Linux
* :
* data
* outputE_obs.txt
* global_variables.cpp
************************************************************************************/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <string.h>
#include "global_variables.cpp"
/************************************************************************************
*
************************************************************************************/
dim3 blockUHyz(nz);
dim3 gridUHyz(npml, nx - 1); //npml: blockIdx.x nx-1: blockIdx.y
__global__ void calcUHyz(float *UHyz, float *RBHyz, float *RAHyz, float *Ez, const float dy)
{
/*
in0 UHyz nx+1 ny nz
in1 RBHyz nx-1 2*npml nz
in2 RAHyz nx-1 2*npml nz
in3 Ez nx+1 ny+1 nz
UHyz = UHyz * RBHyz + RAHyz * (Ez - Ez) / dy
nx-1 * npml * nz
UHyz5
y
UHyz(2:nx, [1:npml ny-npml+1:ny], :)=RBHyz .* UHyz(2:nx, [1:npml ny-npml+1:ny], :)...
+RAHyz ./ dy .* (Ez(2:nx, [2:npml+1 ny-npml+2:ny+1], :) - Ez(2:nx, [1:npml ny-npml+1:ny], :));
*/
int ix = blockIdx.y; // ix in [0, nx - 1)
int iy = blockIdx.x; // iy in [0, npml)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = (ix + 1)*ny*nz + iy * nz + iz; // checked!
int rid0 = (ix + 1)*ny*nz + (iy + ny - npml) * nz + iz; //checked!
int lid1 = ix * (2 * npml)*nz + iy * nz + iz; // checked!
int rid1 = ix * (2 * npml)*nz + (iy + npml) * nz + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid3 = (ix + 1)*(ny + 1)*nz + (iy + 1)*nz + iz; // checked!
int rid3 = (ix + 1)*(ny + 1)*nz + (iy + ny - npml + 1)*nz + iz; // checked!
int lid4 = (ix + 1)*(ny + 1)*nz + iy * nz + iz; // checked!
int rid4 = (ix + 1)*(ny + 1)*nz + (iy + ny - npml)*nz + iz; // checked!
UHyz[lid0] = UHyz[lid0] * RBHyz[lid1] + RAHyz[lid2] * (Ez[lid3] - Ez[lid4]) / dy;
UHyz[rid0] = UHyz[rid0] * RBHyz[rid1] + RAHyz[rid2] * (Ez[rid3] - Ez[rid4]) / dy;
}
dim3 blockUHzy(npml);
dim3 gridUHzy(nx - 1, ny);
__global__ void calcUHzy(float *UHzy, float *RBHzy, float *RAHzy, float *Ey, const float dz)
{
/*
in0 UHzy --size-- nx+1 ny nz
in1 RBHzy --size-- nx-1 ny 2*npml
in2 RAHzy --size-- nx-1 ny 2*npml
in3 Ey --size-- nx+1 ny nz+1
UHyz = UHyz * RBHyz + RAHyz * (Ez - Ez) / dy
nx-1 * ny * (5 *npml)
UHyz5
z
UHzy(2:nx, :, [1:npml nz-npml+1:nz])=RBHzy.*UHzy(2:nx, :, [1:npml nz-npml+1:nz])
+RAHzy./dz.*(Ey(2:nx, :, [2:npml+1 nz-npml+2:nz+1])-Ey(2:nx, :, [1:npml nz-npml+1:nz]));
*/
int ix = blockIdx.x; // ix in [0, nx - 1)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x; // ix in [0, npml)
int lid0 = (ix + 1) * ny * nz + iy * nz + iz; //checked!
int rid0 = (ix + 1) * ny * nz + iy * nz + iz + nz - npml; //checked!
int lid1 = ix * ny * (2 * npml) + iy * (2 * npml) + iz; //checked!
int rid1 = ix * ny * (2 * npml) + iy * (2 * npml) + iz + npml; //checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + iz; //checked!
int rid4 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + iz + nz - npml; //checked!
int lid3 = lid4 + 1;
int rid3 = rid4 + 1;
UHzy[lid0] = UHzy[lid0] * RBHzy[lid1] + RAHzy[lid2] * (Ey[lid3] - Ey[lid4]) / dz;
UHzy[rid0] = UHzy[rid0] * RBHzy[rid1] + RAHzy[rid2] * (Ey[rid3] - Ey[rid4]) / dz;
}
dim3 blockUHzx(npml);
dim3 gridUHzx(nx, ny - 1);
__global__ void calcUHzx(float *UHzx, float *RBHzx, float *RAHzx, float *Ex, const float dz)
{
/*
in0 UHzx --size-- nx ny + 1 nz
in1 RBHzx --size-- nx ny - 1 2 * npml
in2 RAHzx --size-- nx ny - 1 2 * npml
in3 Ex --size-- nx ny + 1 nz + 1
UHzx = UHzx * RBHzx + RAHzx * (Ez - Ez) / dy
nx * ny - 1 * npml
UHzx5
z 1:npml -npml:0
UHzx(:, 2:ny, [1:npml nz - npml + 1:nz])=RBHzx. * UHzx(:, 2:ny, [1:npml nz - npml + 1:nz])
+RAHzx./dz.*(Ex(:, 2:ny, [2:npml + 1 nz - npml + 2:nz + 1]) - Ex(:, 2:ny, [1:npml nz - npml + 1:nz]));
*/
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y; // iy in [0, ny - 1)
int iz = threadIdx.x; // iz in [0, npml)
int lid0 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = ix * (ny + 1) * nz + (iy + 1) * nz + iz + nz - npml; // checked!
int lid1 = ix * (ny - 1) * (2 * npml) + iy * (2 * npml) + iz; // checked!
int rid1 = ix * (ny - 1) * (2 * npml) + iy * (2 * npml) + iz + npml; // checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + iz; // checked!
int rid4 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + iz + nz - npml; // checked!
int lid3 = lid4 + 1;
int rid3 = rid4 + 1;
UHzx[lid0] = UHzx[lid0] * RBHzx[lid1] + RAHzx[lid2] * (Ex[lid3] - Ex[lid4]) / dz;
UHzx[rid0] = UHzx[rid0] * RBHzx[rid1] + RAHzx[rid2] * (Ex[rid3] - Ex[rid4]) / dz;
}
dim3 blockUHxz(nz);
dim3 gridUHxz(npml, ny - 1);
__global__ void calcUHxz(float *UHxz, float *RBHxz, float *RAHxz, float *Ez, const float dx)
{
/*
in0 UHxz --size-- nx ny + 1 nz
in1 RBHxz --size-- 2*npml ny - 1 nz
in2 RAHxz --size-- 2*npml ny - 1 nz
in3 Ez --size-- nx + 1 ny + 1 nz
UHxz = UHxz * RBHxz + RAHxz * (Ez - Ez) / dx
npml * ny - 1 * nz
UHxz5
x 1:npml -npml:0
UHxz([1:npml nx-npml+1:nx], 2:ny, :)=RBHxz.*UHxz([1:npml nx-npml+1:nx], 2:ny, :)...
+RAHxz./dx.*(Ez([2:npml+1 nx-npml+2:nx+1], 2:ny, :)-Ez([1:npml nx-npml+1:nx], 2:ny, :));
*/
int ix = blockIdx.x; // ix in [0, npml)
int iy = blockIdx.y; // iy in [0, ny - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = (ix + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int lid1 = ix * (ny - 1) * nz + iy * nz + iz; // checked!
int rid1 = (ix + npml) * (ny - 1) * nz + iy * nz + iz; // checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid4 = (ix + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int lid3 = lid4 + (ny + 1) * nz;
int rid3 = rid4 + (ny + 1) * nz;
UHxz[lid0] = UHxz[lid0] * RBHxz[lid1] + RAHxz[lid2] * (Ez[lid3] - Ez[lid4]) / dx;
UHxz[rid0] = UHxz[rid0] * RBHxz[rid1] + RAHxz[rid2] * (Ez[rid3] - Ez[rid4]) / dx;
}
dim3 blockUHxy(nz - 1);
dim3 gridUHxy(npml, ny);
__global__ void calcUHxy(float *UHxy, float *RBHxy, float *RAHxy, float *Ey, const float dx)
{
/*
in0 UHxy --size-- nx ny nz + 1
in1 RBHxy --size-- 2*npml ny nz - 1
in2 RAHxy --size-- 2*npml ny nz - 1
in3 EY --size-- nx + 1 ny nz + 1
UHxy = UHxy * RBHxy + RAHxy * (Ez - Ez) / dx
npml * ny * nz - 1
UHxy5
x 1:npml -npml:0
UHxy([1:npml nx-npml+1:nx], :, 2:nz)=RBHxy.*UHxy([1:npml nx-npml+1:nx], :, 2:nz)...
+RAHxy./dx.*(Ey([2:npml+1 nx-npml+2:nx+1], :, 2:nz)-Ey([1:npml nx-npml+1:nx], :, 2:nz));
*/
int ix = blockIdx.x; // ix in [0, npml)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = ix * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid0 = (ix + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + iz + 1; //checked
int lid1 = ix * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = (ix + npml) * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = ix * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid4 = (ix + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int lid3 = lid4 + ny * (nz + 1);
int rid3 = rid4 + ny * (nz + 1);
UHxy[lid0] = UHxy[lid0] * RBHxy[lid1] + RAHxy[lid2] * (Ey[lid3] - Ey[lid4]) / dx;
UHxy[rid0] = UHxy[rid0] * RBHxy[rid1] + RAHxy[rid2] * (Ey[rid3] - Ey[rid4]) / dx;
}
dim3 blockUHyx(nz - 1);
dim3 gridUHyx(npml, nx);
__global__ void calcUHyx(float *UHyx, float *RBHyx, float *RAHyx, float *Ex, const float dy)
{
/*
in0 UHyx nx ny nz + 1
in1 RBHyx nx 2*npml nz - 1
in2 RAHyx nx 2*npml nz - 1
in3 Ex nx ny + 1 nz + 1
UHyx = UHyx * RBHyx + RAHyx * (Ex - Ex) / dy
nx * npml * nz - 1
UHyx5
y
UHyx(:, [1:npml ny-npml+1:ny], 2:nz)=RBHyx.*UHyx(:, [1:npml ny-npml+1:ny], 2:nz)...
+RAHyx./dy.*(Ex(:, [2:npml+1 ny-npml+2:ny+1], 2:nz)-Ex(:, [1:npml ny-npml+1:ny], 2:nz));
*/
int ix = blockIdx.y; // ix in [0, nx)
int iy = blockIdx.x; // iy in [0, npml)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = ix * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid0 = ix * ny * (nz + 1) + (iy + ny - npml) * (nz + 1) + iz + 1; //checked!
int lid1 = ix * (2 * npml) * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = ix * (2 * npml) * (nz - 1) + (iy + npml) * (nz - 1) + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * (ny + 1) * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid4 = ix * (ny + 1) * (nz + 1) + (iy + ny - npml) * (nz + 1) + iz + 1; // checked!
int lid3 = lid4 + (nz + 1); // checked!
int rid3 = rid4 + (nz + 1); // checked!
UHyx[lid0] = UHyx[lid0] * RBHyx[lid1] + RAHyx[lid2] * (Ex[lid3] - Ex[lid4]) / dy;
UHyx[rid0] = UHyx[rid0] * RBHyx[rid1] + RAHyx[rid2] * (Ex[rid3] - Ex[rid4]) / dy;
}
dim3 blockHx(nz);
dim3 gridHx(nx - 1, ny);
__global__ void calcHx(float *Hx, float *CPHx, float *CQHx, float *ky_Hx, float *kz_Hx, float *Ez, float *Ey, float *UHyz, float *UHzy, const float dy, const float dz)
{
//
// * nx - 1 * ny * nz
// * Hx(2:nx,:,:)
//
int ix = blockIdx.x + 1;
int iy = blockIdx.y;
int iz = threadIdx.x;
int idx = ix * ny * nz + iy * nz + iz;
int idxEz = ix * (ny + 1)*nz + iy * nz + iz;
int idxEy = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int deltaEz = nz;
int deltaEy = 1;
float CQH = CQHx[idx];
Hx[idx] = Hx[idx] * CPHx[idx]
- CQH / ky_Hx[idx] * (Ez[idxEz + deltaEz] - Ez[idxEz]) / dy
+ CQH / kz_Hx[idx] * (Ey[idxEy + deltaEy] - Ey[idxEy]) / dz
- CQH * UHyz[idx]
+ CQH * UHzy[idx];
}
dim3 blockHy(nz);
dim3 gridHy(nx, ny - 1);
__global__ void calcHy(float *Hy, float *CPHy, float *CQHy, float *kz_Hy, float *kx_Hy, float *Ex, float *Ez, float *UHzx, float *UHxz, const float dz, const float dx)
{
//
// * nx * ny -1 * nz
// * Hy(:,2:ny,:)
//
int ix = blockIdx.x;
int iy = blockIdx.y + 1;
int iz = threadIdx.x;
int idx = ix * (ny + 1)*nz + iy * nz + iz;
int idxEx = ix * (ny + 1)*(nz + 1) + iy * (nz + 1) + iz;
int idxEz = ix * (ny + 1)*nz + iy * nz + iz;
int deltaEx = 1;
int deltaEz = (ny + 1)*nz;
float CQH = CQHy[idx];
Hy[idx] = Hy[idx] * CPHy[idx]
- CQH / kz_Hy[idx] * (Ex[idxEx + deltaEx] - Ex[idxEx]) / dz
+ CQH / kx_Hy[idx] * (Ez[idxEz + deltaEz] - Ez[idxEz]) / dx
- CQH * UHzx[idx]
+ CQH * UHxz[idx];
}
dim3 blockHz(nz - 1);
dim3 gridHz(nx, ny);
__global__ void calcHz(float *Hz, float *CPHz, float *CQHz, float *kx_Hz, float *ky_Hz, float *Ey, float *Ex, float *UHxy, float *UHyx, const float dx, const float dy)
{
//
// * nx * ny * nz -1
// * Hz(:,;,2:nz)
// * Hznx ny nz+1
//
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x + 1;
int idx = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxEy = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxEx = ix * (ny + 1) * (nz + 1) + iy * (nz + 1) + iz;
int deltaEy = ny * (nz + 1);
int deltaEx = nz + 1;
float CQH = CQHz[idx];
Hz[idx] = Hz[idx] * CPHz[idx]
- CQH / kx_Hz[idx] * (Ey[idxEy + deltaEy] - Ey[idxEy]) / dx
+ CQH / ky_Hz[idx] * (Ex[idxEx + deltaEx] - Ex[idxEx]) / dy
- CQH * UHxy[idx]
+ CQH * UHyx[idx];
}
dim3 blockUEyz(nz - 1);
dim3 gridUEyz(npml - 1, nx);
__global__ void calcUEyz(float *UEyz, float *RBEyz, float *RAEyz, float *Hz, const float dy)
{
/*
dim3 blockUEyz(nz - 1);
dim3 gridUEyz(npml - 1, nx);
in0 UEyz nx ny + 1 nz + 1
in1 RBEyz nx 2*(npml-1) nz - 1
in2 RAEyz nx 2*(npml-1) nz - 1
in3 Hz nx ny nz + 1
nx * npml - 1 * nz - 1
UEyz(:, [2:npml ny-npml+2:ny], 2:nz)=RBEyz .* UEyz(:, [2:npml ny-npml+2:ny], 2:nz)...
+RAEyz ./ dy .* (Hz(:, [2:npml ny-npml+2:ny], 2:nz) - Hz(:, [1:npml-1 ny-npml+1:ny-1], 2:nz));
*/
int ix = blockIdx.y; // ix in [0, nx)
int iy = blockIdx.x; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + (iz + 1); // checked!
int rid0 = ix * (ny + 1) * (nz + 1) + (iy + 1 + ny - npml) * (nz + 1) + (iz + 1); //checked!
int lid1 = ix * (2 * (npml - 1)) * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = ix * (2 * (npml - 1)) * (nz - 1) + (iy + npml - 1) * (nz - 1) + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid4 = ix * ny * (nz + 1) + (iy + ny - npml) * (nz + 1) + (iz + 1); // checked!
int lid3 = lid4 + (nz + 1); // checked!
int rid3 = rid4 + (nz + 1); // checked!
UEyz[lid0] = UEyz[lid0] * RBEyz[lid1] + RAEyz[lid2] * (Hz[lid3] - Hz[lid4]) / dy;
UEyz[rid0] = UEyz[rid0] * RBEyz[rid1] + RAEyz[rid2] * (Hz[rid3] - Hz[rid4]) / dy;
}
dim3 blockUEyx(nz - 1);
dim3 gridUEyx(npml - 1, nx);
__global__ void calcUEyx(float *UEyx, float *RBEyx, float *RAEyx, float *Hx, const float dy)
{
/*
dim3 blockUEyx(nz - 1);
dim3 gridUEyx(npml - 1, nx);
in0 UEyx nx + 1 ny + 1 nz
in1 RBEyx nx - 1 2*(npml-1) nz
in2 RAEyx nx - 1 2*(npml-1) nz
in3 Hx nx + 1 ny nz
nx * npml-1 * nz-1
UEyx(2:nx, [2:npml ny-npml+2:ny], :)=RBEyx .* UEyx(2:nx, [2:npml ny-npml+2:ny], :)...
+RAEyx ./ dy .* (Hx(2:nx, [2:npml ny-npml+2:ny], :) - Hx(2:nx, [1:npml-1 ny-npml+1:ny-1], :));
*/
int ix = blockIdx.y; // ix in [0, nx)
int iy = blockIdx.x; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = (ix + 1) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = (ix + 1) * (ny + 1) * nz + (iy + 1 + ny - npml) * nz + iz; //checked!
int lid1 = ix * (2 * (npml - 1)) * nz + iy * nz + iz; // checked!
int rid1 = ix * (2 * (npml - 1)) * nz + (iy + npml - 1) * nz + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = (ix + 1) * ny * nz + iy * nz + iz; // checked!
int rid4 = (ix + 1) * ny * nz + (iy + ny - npml) * nz + iz; // checked!
int lid3 = lid4 + nz; // checked!
int rid3 = rid4 + nz; // checked!
UEyx[lid0] = UEyx[lid0] * RBEyx[lid1] + RAEyx[lid2] * (Hx[lid3] - Hx[lid4]) / dy;
UEyx[rid0] = UEyx[rid0] * RBEyx[rid1] + RAEyx[rid2] * (Hx[rid3] - Hx[rid4]) / dy;
}
dim3 blockUExy(nz);
dim3 gridUExy(npml - 1, ny - 1);
__global__ void calcUExy(float *UExy, float *RBExy, float *RAExy, float *Hy, const float dx)
{
/*
dim3 blockUExy(nz);
dim3 gridUExy(npml - 1, ny - 1);
in0 UExy nx + 1 ny + 1 nz
in1 RBExy 2*(npml-1) ny - 1 nz
in2 RAExy 2*(npml-1) ny - 1 nz
in3 Hy nx ny + 1 nz
npml-1 * ny-1 * nz
UExy([2:npml nx-npml+2:nx], 2:ny, :)=RBExy .* UExy([2:npml nx-npml+2:nx], 2:ny, :)...
+RAExy ./ dx .* (Hy([2:npml nx-npml+2:nx], 2:ny, :) - Hy([1:npml-1 nx-npml+1:nx-1], 2:ny, :));
*/
int ix = blockIdx.x; // ix in [0, npml - 1)
int iy = blockIdx.y; // iy in [0, ny - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = (ix + 1) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = (ix + 1 + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; //checked!
int lid1 = ix * (ny - 1) * nz + iy * nz + iz; // checked!
int rid1 = (ix + npml - 1) * (ny - 1) * nz + iy * nz + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid4 = (ix + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int lid3 = lid4 + (ny + 1) * nz; // checked!
int rid3 = rid4 + (ny + 1) * nz; // checked!
UExy[lid0] = UExy[lid0] * RBExy[lid1] + RAExy[lid2] * (Hy[lid3] - Hy[lid4]) / dx;
UExy[rid0] = UExy[rid0] * RBExy[rid1] + RAExy[rid2] * (Hy[rid3] - Hy[rid4]) / dx;
}
dim3 blockUExz(nz - 1);
dim3 gridUExz(npml - 1, ny);
__global__ void calcUExz(float *UExz, float *RBExz, float *RAExz, float *Hz, const float dx)
{
/*
dim3 blockUExz(nz - 1);
dim3 gridUExz(npml - 1, ny);
in0 UExz nx + 1 ny nz + 1
in1 RBExz 2*(npml-1) ny nz - 1
in2 RAExz 2*(npml-1) ny nz - 1
in3 Hz nx ny nz + 1
npml-1 * ny * nz-1
UExz([2:npml nx-npml+2:nx], :, 2:nz)=RBExz .* UExz([2:npml nx-npml+2:nx], :, 2:nz)...
+RAExz ./ dx .* (Hz([2:npml nx-npml+2:nx], :, 2:nz) - Hz([1:npml-1 nx-npml+1:nx-1], :, 2:nz));
*/
int ix = blockIdx.x; // ix in [0, npml - 1)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid0 = (ix + 1 + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); //checked!
int lid1 = ix * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = (ix + npml - 1) * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid4 = (ix + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int lid3 = lid4 + ny * (nz + 1); // checked!
int rid3 = rid4 + ny * (nz + 1); // checked!
UExz[lid0] = UExz[lid0] * RBExz[lid1] + RAExz[lid2] * (Hz[lid3] - Hz[lid4]) / dx;
UExz[rid0] = UExz[rid0] * RBExz[rid1] + RAExz[rid2] * (Hz[rid3] - Hz[rid4]) / dx;
}
dim3 blockUEzx(npml - 1);
dim3 gridUEzx(nx - 1, ny);
__global__ void calcUEzx(float *UEzx, float *RBEzx, float *RAEzx, float *Hx, const float dz)
{
/*
dim3 blockUEzx(npml - 1);
dim3 gridUEzx(nx - 1, ny);
in0 UEzx nx + 1 ny nz + 1
in1 RBEzx nx - 1 ny 2*(npml-1)
in2 RAEzx nx - 1 ny 2*(npml-1)
in3 Hx nx + 1 ny nz
nx-1 * ny * npml-1
UEzx(2:nx, :, [2:npml nz-npml+2:nz])=RBEzx .* UEzx(2:nx, :, [2:npml nz-npml+2:nz])...
+RAEzx ./ dz .* (Hx(2:nx, :, [2:npml nz-npml+2:nz]) - Hx(2:nx, :, [1:npml-1 nz-npml+1:nz-1]));
*/
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid0 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1 + nz - npml); //checked!
int lid1 = ix * ny * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + iz; // checked!
int rid1 = ix * ny * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + (iz + npml - 1); // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = (ix + 1) * ny * nz + iy * nz + iz; // checked!
int rid4 = (ix + 1) * ny * nz + iy * nz + (iz + nz - npml); // checked!
int lid3 = lid4 + 1; // checked!
int rid3 = rid4 + 1; // checked!
UEzx[lid0] = UEzx[lid0] * RBEzx[lid1] + RAEzx[lid2] * (Hx[lid3] - Hx[lid4]) / dz;
UEzx[rid0] = UEzx[rid0] * RBEzx[rid1] + RAEzx[rid2] * (Hx[rid3] - Hx[rid4]) / dz;
}
dim3 blockUEzy(npml - 1);
dim3 gridUEzy(nx, ny - 1);
__global__ void calcUEzy(float *UEzy, float *RBEzy, float *RAEzy, float *Hy, const float dz)
{
/*
dim3 blockUEzy(npml - 1);
dim3 gridUEzy(nx, ny - 1);
in0 UEzy nx ny + 1 nz + 1
in1 RBEzy nx ny - 1 2*(npml-1)
in2 RAEzy nx ny - 1 2*(npml-1)
in3 Hy nx ny + 1 nz
nx * ny - 1 * npml-1
UEzy(:, 2:ny, [2:npml nz-npml+2:nz])=RBEzy.*UEzy(:, 2:ny, [2:npml nz-npml+2:nz])...
+RAEzy./dz.*(Hy(:, 2:ny, [2:npml nz-npml+2:nz])-Hy(:, 2:ny, [1:npml-1 nz-npml+1:nz-1]));
*/
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + (iz + 1); // checked!
int rid0 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + (iz + 1 + nz - npml); //checked!
int lid1 = ix * (ny - 1) * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + iz; // checked!
int rid1 = ix * (ny - 1) * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + (iz + npml - 1); // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid4 = ix * (ny + 1) * nz + (iy + 1) * nz + (iz + nz - npml); // checked!
int lid3 = lid4 + 1; // checked!
int rid3 = rid4 + 1; // checked!
UEzy[lid0] = UEzy[lid0] * RBEzy[lid1] + RAEzy[lid2] * (Hy[lid3] - Hy[lid4]) / dz;
UEzy[rid0] = UEzy[rid0] * RBEzy[rid1] + RAEzy[rid2] * (Hy[rid3] - Hy[rid4]) / dz;
}
dim3 blockEx(nz - 1);
dim3 gridEx(nx, ny - 1);
__global__ void calcEx(float *Ex, float *CAEx, float *CBEx, float *ky_Ex, float *kz_Ex, float *Hz, float *Hy, float *UEyz, float *UEzy, const float dy, const float dz)
{
//
// * dim3 blockEx(nz-1);
// * dim3 gridEx(nx, ny-1);
// * nx * ny-1 * nz-1
// * Ex(:, 2:ny, 2:nz)
//
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y + 1; // iy in [1, ny)
int iz = threadIdx.x + 1; // iz in [1, nz)
int idx = ix * (ny + 1) * (nz + 1) + iy * (nz + 1) + iz;
int idxHz = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxHy = ix * (ny + 1)*nz + iy * nz + iz;
int deltaHz = nz + 1;
int deltaHy = 1;
float CBE = CBEx[idx];
Ex[idx] = Ex[idx] * CAEx[idx]
+ CBE / ky_Ex[idx] * (Hz[idxHz] - Hz[idxHz - deltaHz]) / dy
- CBE / kz_Ex[idx] * (Hy[idxHy] - Hy[idxHy - deltaHy]) / dz
+ CBE * UEyz[idx]
- CBE * UEzy[idx];
}
dim3 blockEy(nz - 1);
dim3 gridEy(nx - 1, ny);
__global__ void calcEy(float *Ey, float *CAEy, float *CBEy, float *kz_Ey, float *kx_Ey, float *Hx, float *Hz, float *UEzx, float *UExz, const float dz, const float dx)
{
//
// * dim3 blockEy(nz-1);
// * dim3 gridEy(nx-1, ny);
// * nx-1 * ny * nz-1
// * Ey(2:nx, :, 2:nz)
//
int ix = blockIdx.x + 1; // ix in [1, nx)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x + 1; // iz in [1, nz)
int idx = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxHx = ix * ny * nz + iy * nz + iz;
int idxHz = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int deltaHx = 1;
int deltaHz = ny * (nz + 1);
float CBE = CBEy[idx];
Ey[idx] = Ey[idx] * CAEy[idx]
+ CBE / kz_Ey[idx] * (Hx[idxHx] - Hx[idxHx - deltaHx]) / dz
- CBE / kx_Ey[idx] * (Hz[idxHz] - Hz[idxHz - deltaHz]) / dx
+ CBE * UEzx[idx]
- CBE * UExz[idx];
}
dim3 blockEz(nz);
dim3 gridEz(nx - 1, ny - 1);
__global__ void calcEz(float *Ez, float *CAEz, float *CBEz, float *kx_Ez, float *ky_Ez, float *Hy, float *Hx, float *UExy, float *UEyx, const float dx, const float dy)
{
//
// * dim3 blockEz(nz);
// * dim3 gridEz(nx-1, ny-1);
// * nx-1 * ny-1 * nz
// * Ez(2:nx, 2:ny, :)
// * Eznx ny nz+1
//
int ix = blockIdx.x + 1; // ix in [1, nx)
int iy = blockIdx.y + 1; // iy in [1, ny)
int iz = threadIdx.x; // iz in [0, nz)
int idx = ix * (ny + 1) * nz + iy * nz + iz;
int idxHy = ix * (ny + 1) * nz + iy * nz + iz;
int idxHx = ix * ny * nz + iy * nz + iz;
int deltaHy = (ny + 1) * nz;
int deltaHx = nz;
float CBE = CBEz[idx];
Ez[idx] = Ez[idx] * CAEz[idx]
+ CBE / kx_Ez[idx] * (Hy[idxHy] - Hy[idxHy - deltaHy]) / dx
- CBE / ky_Ez[idx] * (Hx[idxHx] - Hx[idxHx - deltaHx]) / dy
+ CBE * UExy[idx]
- CBE * UEyx[idx];
}
__global__ void print_dev_matrix(float *A, int i,int j,int k,int xdim,int ydim,int zdim)
{
int idx = i * ydim*zdim + j * zdim + k;
printf("dev_Matrix[%d][%d][%d] = %8f\n", i, j, k, A[idx]);
}
void readInteger(const char *name, int *a, int n1, int n2, int n3)
{
FILE *fp = fopen(name, "r");
if (fp == NULL) //
{
printf("fopen %s error! \n", name);
return;
}
printf("fopen %s ok! \n", name);
for (int i = 0; i < n1; i++)
{
for (int k = 0; k < n3; k++)
{
for (int j = 0; j < n2; j++)
{
fscanf(fp, "%d", &a[i * n2*n3 + j * n3 + k]); // a[i][j][k]
}
}
}
printf("read %s OK\n", name);
fclose(fp);
return;
}
void readFloat(const char *name, float *a, int n1, int n2, int n3)
{
FILE *fp = fopen(name, "r");
if (fp == NULL) //
{
printf("fopen %s error! \n", name);
return;
}
printf("fopen %s ok! \n", name);
for (int i = 0; i < n1; i++)
{
for (int k = 0; k < n3; k++)
{
for (int j = 0; j < n2; j++)
{
fscanf(fp, "%f", a + i * n2*n3 + j * n3 + k); // a[i][j][k]
}
}
}
printf("read %s OK\n", name);
fclose(fp);
return;
}
void readAllData()
{
readFloat("data/CAEx.txt", (float*)CAEx, nx, ny + 1, nz + 1);
readFloat("data/CBEx.txt", (float*)CBEx, nx, ny + 1, nz + 1);
readFloat("data/RAEyz.txt", (float*)RAEyz, nx, 2 * (npml - 1), nz - 1);
readFloat("data/RBEyz.txt", (float*)RBEyz, nx, 2 * (npml - 1), nz - 1);
readFloat("data/RAEzy.txt", (float*)RAEzy, nx, ny - 1, 2 * (npml - 1));
readFloat("data/RBEzy.txt", (float*)RBEzy, nx, ny - 1, 2 * (npml - 1));
readFloat("data/CAEy.txt", (float*)CAEy, nx + 1, ny, nz + 1);
readFloat("data/CBEy.txt", (float*)CBEy, nx + 1, ny, nz + 1);
readFloat("data/RAEzx.txt", (float*)RAEzx, nx - 1, ny, 2 * (npml - 1));
readFloat("data/RBEzx.txt", (float*)RBEzx, nx - 1, ny, 2 * (npml - 1));
readFloat("data/RAExz.txt", (float*)RAExz, 2 * (npml - 1), ny, nz - 1);
readFloat("data/RBExz.txt", (float*)RBExz, 2 * (npml - 1), ny, nz - 1);
readFloat("data/CAEz.txt", (float*)CAEz, nx + 1, ny + 1, nz);
readFloat("data/CBEz.txt", (float*)CBEz, nx + 1, ny + 1, nz);
readFloat("data/RAExy.txt", (float*)RAExy, 2 * (npml - 1), ny - 1, nz);
readFloat("data/RBExy.txt", (float*)RBExy, 2 * (npml - 1), ny - 1, nz);
readFloat("data/RAEyx.txt", (float*)RAEyx, nx - 1, 2 * (npml - 1), nz);
readFloat("data/RBEyx.txt", (float*)RBEyx, nx - 1, 2 * (npml - 1), nz);
readFloat("data/CPHx.txt", (float*)CPHx, nx + 1, ny, nz);
readFloat("data/CQHx.txt", (float*)CQHx, nx + 1, ny, nz);
readFloat("data/RAHyz.txt", (float*)RAHyz, nx - 1, 2 * npml, nz);
readFloat("data/RBHyz.txt", (float*)RBHyz, nx - 1, 2 * npml, nz);
readFloat("data/RAHzy.txt", (float*)RAHzy, nx - 1, ny, 2 * npml);
readFloat("data/RBHzy.txt", (float*)RBHzy, nx - 1, ny, 2 * npml);
readFloat("data/CPHy.txt", (float*)CPHy, nx, ny + 1, nz);
readFloat("data/CQHy.txt", (float*)CQHy, nx, ny + 1, nz);
readFloat("data/RAHzx.txt", (float*)RAHzx, nx, ny - 1, 2 * npml);
readFloat("data/RBHzx.txt", (float*)RBHzx, nx, ny - 1, 2 * npml);
readFloat("data/RAHxz.txt", (float*)RAHxz, 2 * npml, ny - 1, nz);
readFloat("data/RBHxz.txt", (float*)RBHxz, 2 * npml, ny - 1, nz);
readFloat("data/CPHz.txt", (float*)CPHz, nx, ny, nz + 1);
readFloat("data/CQHz.txt", (float*)CQHz, nx, ny, nz + 1);
readFloat("data/RAHxy.txt", (float*)RAHxy, 2 * npml, ny, nz - 1);
readFloat("data/RBHxy.txt", (float*)RBHxy, 2 * npml, ny, nz - 1);
readFloat("data/RAHyx.txt", (float*)RAHyx, nx, 2 * npml, nz - 1);
readFloat("data/RBHyx.txt", (float*)RBHyx, nx, 2 * npml, nz - 1);
readFloat("data/kx_Ey.txt", (float*)kx_Ey, nx + 1, ny, nz + 1);
readFloat("data/kx_Ez.txt", (float*)kx_Ez, nx + 1, ny + 1, nz);
readFloat("data/ky_Ex.txt", (float*)ky_Ex, nx, ny + 1, nz + 1);
readFloat("data/ky_Ez.txt", (float*)ky_Ez, nx + 1, ny + 1, nz);
readFloat("data/kz_Ex.txt", (float*)kz_Ex, nx, ny + 1, nz + 1);
readFloat("data/kz_Ey.txt", (float*)kz_Ey, nx + 1, ny, nz + 1);
readFloat("data/kx_Hy.txt", (float*)kx_Hy, nx, ny + 1, nz);
readFloat("data/kx_Hz.txt", (float*)kx_Hz, nx, ny, nz + 1);
readFloat("data/ky_Hx.txt", (float*)ky_Hx, nx + 1, ny, nz);
readFloat("data/ky_Hz.txt", (float*)ky_Hz, nx, ny, nz + 1);
readFloat("data/kz_Hx.txt", (float*)kz_Hx, nx + 1, ny, nz);
readFloat("data/kz_Hy.txt", (float*)kz_Hy, nx, ny + 1, nz);
readInteger("data/fswzx.txt", (int*)fswzx, 1, 1, szfsw);
readInteger("data/fswzy.txt", (int*)fswzy, 1, 1, szfsw);
readInteger("data/fswzz.txt", (int*)fswzz, 1, 1, szfsw);
readInteger("data/jswzx.txt", (int*)jswzx, 1, 1, szfsw);
readInteger("data/jswzy.txt", (int*)jswzy, 1, 1, szfsw);
readInteger("data/jswzz.txt", (int*)jswzz, 1, 1, szfsw);
readFloat("data/source.txt", (float*)source, 1, 1, it);
}
void printE_obs()
{
const char *name = "output/E_obs.txt";
FILE *fp = fopen(name, "w+");
if (fp == NULL) //
{
printf("fopen %s error! \n", name);
}
printf("print fopen %s ok! \n", name);
fprintf(fp, "E_obs[%d][%d]\n", it, szfsw);
fprintf(fp, " %d %d \n", szfsw, it);
for (int i = 0; i < szfsw; i++)
{
for (int j = 0; j < it; j++)
{
fprintf(fp, "%8f ", E_obs[j][i]);
}
fprintf(fp, "\n");
}
printf("print %s OK\n", name);
fclose(fp);
return;
}
void gpu_memory_malloc()
{
//
hipMalloc((void**)&dev_CAEx, sizeof(CAEx));
hipMalloc((void**)&dev_CBEx, sizeof(CBEx));
hipMalloc((void**)&dev_RAEyz, sizeof(RAEyz));
hipMalloc((void**)&dev_RBEyz, sizeof(RBEyz));
hipMalloc((void**)&dev_RAEzy, sizeof(RAEzy));
hipMalloc((void**)&dev_RBEzy, sizeof(RBEzy));
hipMalloc((void**)&dev_CAEy, sizeof(CAEy));
hipMalloc((void**)&dev_CBEy, sizeof(CBEy));
hipMalloc((void**)&dev_RAExz, sizeof(RAExz));
hipMalloc((void**)&dev_RBExz, sizeof(RBExz));
hipMalloc((void**)&dev_RAEzx, sizeof(RAEzx));
hipMalloc((void**)&dev_RBEzx, sizeof(RBEzx));
hipMalloc((void**)&dev_CAEz, sizeof(CAEz));
hipMalloc((void**)&dev_CBEz, sizeof(CBEz));
hipMalloc((void**)&dev_RAExy, sizeof(RAExy));
hipMalloc((void**)&dev_RBExy, sizeof(RBExy));
hipMalloc((void**)&dev_RAEyx, sizeof(RAEyx));
hipMalloc((void**)&dev_RBEyx, sizeof(RBEyx));
hipMalloc((void**)&dev_CPHx, sizeof(CPHx));
hipMalloc((void**)&dev_CQHx, sizeof(CQHx));
hipMalloc((void**)&dev_RAHyz, sizeof(RAHyz));
hipMalloc((void**)&dev_RBHyz, sizeof(RBHyz));
hipMalloc((void**)&dev_RAHzy, sizeof(RAHzy));
hipMalloc((void**)&dev_RBHzy, sizeof(RBHzy));
hipMalloc((void**)&dev_CPHy, sizeof(CPHy));
hipMalloc((void**)&dev_CQHy, sizeof(CQHy));
hipMalloc((void**)&dev_RAHxz, sizeof(RAHxz));
hipMalloc((void**)&dev_RBHxz, sizeof(RBHxz));
hipMalloc((void**)&dev_RAHzx, sizeof(RAHzx));
hipMalloc((void**)&dev_RBHzx, sizeof(RBHzx));
hipMalloc((void**)&dev_CPHz, sizeof(CPHz));
hipMalloc((void**)&dev_CQHz, sizeof(CQHz));
hipMalloc((void**)&dev_RAHxy, sizeof(RAHxy));
hipMalloc((void**)&dev_RBHxy, sizeof(RBHxy));
hipMalloc((void**)&dev_RAHyx, sizeof(RAHyx));
hipMalloc((void**)&dev_RBHyx, sizeof(RBHyx));
hipMalloc((void**)&dev_kx_Ey, sizeof(kx_Ey));
hipMalloc((void**)&dev_kx_Ez, sizeof(kx_Ez));
hipMalloc((void**)&dev_ky_Ex, sizeof(ky_Ex));
hipMalloc((void**)&dev_ky_Ez, sizeof(ky_Ez));
hipMalloc((void**)&dev_kz_Ex, sizeof(kz_Ex));
hipMalloc((void**)&dev_kz_Ey, sizeof(kz_Ey));
hipMalloc((void**)&dev_kx_Hy, sizeof(kx_Hy));
hipMalloc((void**)&dev_kx_Hz, sizeof(kx_Hz));
hipMalloc((void**)&dev_ky_Hx, sizeof(ky_Hx));
hipMalloc((void**)&dev_ky_Hz, sizeof(ky_Hz));
hipMalloc((void**)&dev_kz_Hx, sizeof(kz_Hx));
hipMalloc((void**)&dev_kz_Hy, sizeof(kz_Hy));
//gpu
int szEx = nx * (ny + 1)*(nz + 1);
int szEy = (nx + 1)*ny*(nz + 1);
int szEz = (nx + 1)*(ny + 1)*nz;
int szHx = (nx + 1)*ny*nz;
int szHy = nx * (ny + 1)*nz;
int szHz = nx * ny*(nz + 1);
hipMalloc((void**)&dev_Ex, szEx * sizeof(float));
hipMalloc((void**)&dev_UEyz, szEx * sizeof(float));
hipMalloc((void**)&dev_UEzy, szEx * sizeof(float));
hipMalloc((void**)&dev_Ey, szEy * sizeof(float));
hipMalloc((void**)&dev_UEzx, szEy * sizeof(float));
hipMalloc((void**)&dev_UExz, szEy * sizeof(float));
hipMalloc((void**)&dev_Ez, szEz * sizeof(float));
hipMalloc((void**)&dev_UExy, szEz * sizeof(float));
hipMalloc((void**)&dev_UEyx, szEz * sizeof(float));
hipMalloc((void**)&dev_Hx, szHx * sizeof(float));
hipMalloc((void**)&dev_UHyz, szHx * sizeof(float));
hipMalloc((void**)&dev_UHzy, szHx * sizeof(float));
hipMalloc((void**)&dev_Hy, szHy * sizeof(float));
hipMalloc((void**)&dev_UHzx, szHy * sizeof(float));
hipMalloc((void**)&dev_UHxz, szHy * sizeof(float));
hipMalloc((void**)&dev_Hz, szHz * sizeof(float));
hipMalloc((void**)&dev_UHxy, szHz * sizeof(float));
hipMalloc((void**)&dev_UHyx, szHz * sizeof(float));
hipMalloc((void**)&dev_V, sizeof(V));
hipMalloc((void**)&dev_E_obs, sizeof(E_obs));
hipMalloc((void**)&dev_source, sizeof(source));
}
void gpu_memory_set_zero()
{
int szEx = nx * (ny + 1)*(nz + 1);
int szEy = (nx + 1)*ny*(nz + 1);
int szEz = (nx + 1)*(ny + 1)*nz;
int szHx = (nx + 1)*ny*nz;
int szHy = nx * (ny + 1)*nz;
int szHz = nx * ny*(nz + 1);
//gpu
hipMemset(dev_Ex, 0, szEx * sizeof(float));
hipMemset(dev_UEyz, 0, szEx * sizeof(float));
hipMemset(dev_UEzy, 0, szEx * sizeof(float));
hipMemset(dev_Ey, 0, szEy * sizeof(float));
hipMemset(dev_UEzx, 0, szEy * sizeof(float));
hipMemset(dev_UExz, 0, szEy * sizeof(float));
hipMemset(dev_Ez, 0, szEz * sizeof(float));
hipMemset(dev_UExy, 0, szEz * sizeof(float));
hipMemset(dev_UEyx, 0, szEz * sizeof(float));
hipMemset(dev_Hx, 0, szHx * sizeof(float));
hipMemset(dev_UHyz, 0, szHx * sizeof(float));
hipMemset(dev_UHzy, 0, szHx * sizeof(float));
hipMemset(dev_Hy, 0, szHy * sizeof(float));
hipMemset(dev_UHzx, 0, szHy * sizeof(float));
hipMemset(dev_UHxz, 0, szHy * sizeof(float));
hipMemset(dev_Hz, 0, szHz * sizeof(float));
hipMemset(dev_UHxy, 0, szHz * sizeof(float));
hipMemset(dev_UHyx, 0, szHz * sizeof(float));
hipMemset(dev_V, 0, sizeof(V));
hipMemset(dev_E_obs, 0, sizeof(E_obs));
}
void gpu_memory_copy()
{
hipError_t cudaStatus;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_CAEx, CAEx, sizeof(CAEx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_CBEx, CBEx, sizeof(CBEx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RAEyz, RAEyz, sizeof(RAEyz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RBEyz, RBEyz, sizeof(RBEyz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RAEzy, RAEzy, sizeof(RAEzy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RBEzy, RBEzy, sizeof(RBEzy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_CAEy, CAEy, sizeof(CAEy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_CBEy, CBEy, sizeof(CBEy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RAExz, RAExz, sizeof(RAExz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RBExz, RBExz, sizeof(RBExz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RAEzx, RAEzx, sizeof(RAEzx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RBEzx, RBEzx, sizeof(RBEzx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_CAEz, CAEz, sizeof(CAEz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_CBEz, CBEz, sizeof(CBEz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RAExy, RAExy, sizeof(RAExy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RBExy, RBExy, sizeof(RBExy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RAEyx, RAEyx, sizeof(RAEyx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RBEyx, RBEyx, sizeof(RBEyx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_CPHx, CPHx, sizeof(CPHx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_CQHx, CQHx, sizeof(CQHx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RAHyz, RAHyz, sizeof(RAHyz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RBHyz, RBHyz, sizeof(RBHyz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RAHzy, RAHzy, sizeof(RAHzy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RBHzy, RBHzy, sizeof(RBHzy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_CPHy, CPHy, sizeof(CPHy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_CQHy, CQHy, sizeof(CQHy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RAHxz, RAHxz, sizeof(RAHxz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RBHxz, RBHxz, sizeof(RBHxz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RAHzx, RAHzx, sizeof(RAHzx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RBHzx, RBHzx, sizeof(RBHzx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_CPHz, CPHz, sizeof(CPHz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_CQHz, CQHz, sizeof(CQHz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RAHxy, RAHxy, sizeof(RAHxy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RBHxy, RBHxy, sizeof(RBHxy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RAHyx, RAHyx, sizeof(RAHyx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_RBHyx, RBHyx, sizeof(RBHyx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_kx_Ey, kx_Ey, sizeof(kx_Ey), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_kx_Ez, kx_Ez, sizeof(kx_Ez), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_ky_Ex, ky_Ex, sizeof(ky_Ex), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_ky_Ez, ky_Ez, sizeof(ky_Ez), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_kz_Ex, kz_Ex, sizeof(kz_Ex), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_kz_Ey, kz_Ey, sizeof(kz_Ey), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_kx_Hy, kx_Hy, sizeof(kx_Hy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_kx_Hz, kx_Hz, sizeof(kx_Hz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_ky_Hx, ky_Hx, sizeof(ky_Hx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_ky_Hz, ky_Hz, sizeof(ky_Hz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_kz_Hx, kz_Hx, sizeof(kz_Hx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_kz_Hy, kz_Hy, sizeof(kz_Hy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
cudaStatus = hipMemcpy(dev_source, source, sizeof(source), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); goto Error; }
Error:
return;
}
void gpu_memory_free()
{
hipFree(dev_CAEx);
hipFree(dev_CBEx);
hipFree(dev_RAEyz);
hipFree(dev_RBEyz);
hipFree(dev_RAEzy);
hipFree(dev_RBEzy);
hipFree(dev_CAEy);
hipFree(dev_CBEy);
hipFree(dev_RAExz);
hipFree(dev_RBExz);
hipFree(dev_RAEzx);
hipFree(dev_RBEzx);
hipFree(dev_CAEz);
hipFree(dev_CBEz);
hipFree(dev_RAExy);
hipFree(dev_RBExy);
hipFree(dev_RAEyx);
hipFree(dev_RBEyx);
hipFree(dev_CPHx);
hipFree(dev_CQHx);
hipFree(dev_RAHyz);
hipFree(dev_RBHyz);
hipFree(dev_RAHzy);
hipFree(dev_RBHzy);
hipFree(dev_CPHy);
hipFree(dev_CQHy);
hipFree(dev_RAHxz);
hipFree(dev_RBHxz);
hipFree(dev_RAHzx);
hipFree(dev_RBHzx);
hipFree(dev_CPHz);
hipFree(dev_CQHz);
hipFree(dev_RAHxy);
hipFree(dev_RBHxy);
hipFree(dev_RAHyx);
hipFree(dev_RBHyx);
hipFree(dev_kx_Ey);
hipFree(dev_kx_Ez);
hipFree(dev_ky_Ex);
hipFree(dev_ky_Ez);
hipFree(dev_kz_Ex);
hipFree(dev_kz_Ey);
hipFree(dev_kx_Hy);
hipFree(dev_kx_Hz);
hipFree(dev_ky_Hx);
hipFree(dev_ky_Hz);
hipFree(dev_kz_Hx);
hipFree(dev_kz_Hy);
hipFree(dev_Ex);
hipFree(dev_UEyz);
hipFree(dev_UEzy);
hipFree(dev_Ey);
hipFree(dev_UEzx);
hipFree(dev_UExz);
hipFree(dev_Ez);
hipFree(dev_UExy);
hipFree(dev_UEyx);
hipFree(dev_Hx);
hipFree(dev_UHyz);
hipFree(dev_UHzy);
hipFree(dev_Hy);
hipFree(dev_UHzx);
hipFree(dev_UHxz);
hipFree(dev_Hz);
hipFree(dev_UHxy);
hipFree(dev_UHyx);
hipFree(dev_V);
hipFree(dev_E_obs);
hipFree(dev_source);
}
hipError_t calcWithCuda()
{
hipError_t cudaStatus;
// kernel
int i, j;
for (i = 0; i < szfsw; i++)
{
gpu_memory_set_zero();
for (j = 0; j < it; j++)
{
if (j % 200 == 0)
{
printf("i = %3d / %d, j = %4d / %d\n", i, szfsw, j, it);
}
// MATLABEx[fswzx[i] - 1][fswzy[i] - 1][fswzz[i] - 1] = source[j];
int fidx = (fswzx[i] - 1)*(ny + 1)*(nz + 1) + (fswzy[i] - 1)*(nz + 1) + fswzz[i] - 1;
cudaStatus = hipMemcpy(&(dev_Ex[fidx]), &(dev_source[j]), sizeof(float), hipMemcpyDeviceToDevice);
calcUHyz << < gridUHyz, blockUHyz >> > (dev_UHyz, dev_RBHyz, dev_RAHyz, dev_Ez, dy);
calcUHzy << < gridUHzy, blockUHzy >> > (dev_UHzy, dev_RBHzy, dev_RAHzy, dev_Ey, dz);
calcUHxy << < gridUHxy, blockUHxy >> > (dev_UHxy, dev_RBHxy, dev_RAHxy, dev_Ey, dx);
calcUHxz << < gridUHxz, blockUHxz >> > (dev_UHxz, dev_RBHxz, dev_RAHxz, dev_Ez, dx);
calcUHyx << < gridUHyx, blockUHyx >> > (dev_UHyx, dev_RBHyx, dev_RAHyx, dev_Ex, dy);
calcUHzx << < gridUHzx, blockUHzx >> > (dev_UHzx, dev_RBHzx, dev_RAHzx, dev_Ex, dz);
calcHx << < gridHx, blockHx >> > (dev_Hx, dev_CPHx, dev_CQHx, dev_ky_Hx, dev_kz_Hx, dev_Ez, dev_Ey, dev_UHyz, dev_UHzy, dy, dz);
calcHy << < gridHy, blockHy >> > (dev_Hy, dev_CPHy, dev_CQHy, dev_kz_Hy, dev_kx_Hy, dev_Ex, dev_Ez, dev_UHzx, dev_UHxz, dz, dx);
calcHz << < gridHz, blockHz >> > (dev_Hz, dev_CPHz, dev_CQHz, dev_kx_Hz, dev_ky_Hz, dev_Ey, dev_Ex, dev_UHxy, dev_UHyx, dx, dy);
calcUExy << < gridUExy, blockUExy >> > (dev_UExy, dev_RBExy, dev_RAExy, dev_Hy, dx);
calcUExz << < gridUExz, blockUExz >> > (dev_UExz, dev_RBExz, dev_RAExz, dev_Hz, dx);
calcUEyx << < gridUEyx, blockUEyx >> > (dev_UEyx, dev_RBEyx, dev_RAEyx, dev_Hx, dy);
calcUEyz << < gridUEyz, blockUEyz >> > (dev_UEyz, dev_RBEyz, dev_RAEyz, dev_Hz, dy);
calcUEzx << < gridUEzx, blockUEzx >> > (dev_UEzx, dev_RBEzx, dev_RAEzx, dev_Hx, dz);
calcUEzy << < gridUEzy, blockUEzy >> > (dev_UEzy, dev_RBEzy, dev_RAEzy, dev_Hy, dz);
calcEx << < gridEx, blockEx >> > (dev_Ex, dev_CAEx, dev_CBEx, dev_ky_Ex, dev_kz_Ex, dev_Hz, dev_Hy, dev_UEyz, dev_UEzy, dy, dz);
calcEy << < gridEy, blockEy >> > (dev_Ey, dev_CAEy, dev_CBEy, dev_kz_Ey, dev_kx_Ey, dev_Hx, dev_Hz, dev_UEzx, dev_UExz, dz, dx);
calcEz << < gridEz, blockEz >> > (dev_Ez, dev_CAEz, dev_CBEz, dev_kx_Ez, dev_ky_Ez, dev_Hy, dev_Hx, dev_UExy, dev_UEyx, dx, dy);
// ?
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) { printf("calc failed: %s\n", hipGetErrorString(cudaStatus)); return cudaStatus; }
// MATLABV(j)=Ex(jswzx(i), jswzy(i), jswzz(i));
int jidx = (jswzx[i] - 1)*(ny + 1)*(nz + 1) + (jswzy[i] - 1)*(nz + 1) + jswzz[i] - 1;
cudaStatus = hipMemcpy(&(dev_V[j]), &(dev_Ex[jidx]), sizeof(float), hipMemcpyDeviceToDevice);
if (cudaStatus != hipSuccess) { printf("V hipMemcpy failed: %s\n", hipGetErrorString(cudaStatus)); return cudaStatus; };
// MATLABE_obs(:,i)=V;
cudaStatus = hipMemcpy(&(E_obs[j][i]), &(dev_V[j]), sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) { printf("V hipMemcpy failed: %s\n", hipGetErrorString(cudaStatus)); return cudaStatus; };
}
}
printf("finish calc !\n");
hipDeviceSynchronize();
return cudaStatus;
}
/************************************************************************************
*
************************************************************************************/
int main()
{
printf("c = %e\n", dt);
readAllData();
printf("Read All Data OK ! \n");
// GPU
hipError_t cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) { printf("hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); return 1; }
else { printf("hipSetDevice success!\n"); }
gpu_memory_malloc();
gpu_memory_copy();
// gpu
cudaStatus = calcWithCuda();
if (cudaStatus != hipSuccess) { printf("calcWithCuda failed!"); return 1; }
else { printf("calcWithCudasuccess!\n"); }
gpu_memory_free();
// GPU
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) { printf("hipDeviceReset failed!"); return 1; }
//
printE_obs();
return 0;
} | 73d17d7577a440191e2c6b46229226d75f71d9a8.cu | /************************************************************************************
* Author: Tao Rui
* 版本: V1.0 单卡,Linux版
* 说明:
* 数据从从当前目录下的data文件夹读入
* 结果输出到当前目录下的output文件夹中E_obs.txt
* 参数在global_variables.cpp中修改
************************************************************************************/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <string.h>
#include "global_variables.cpp"
/************************************************************************************
* 函数定义
************************************************************************************/
dim3 blockUHyz(nz);
dim3 gridUHyz(npml, nx - 1); //npml: blockIdx.x的变化范围, nx-1就是: blockIdx.y的变化范围
__global__ void calcUHyz(float *UHyz, float *RBHyz, float *RAHyz, float *Ez, const float dy)
{
/*
in0 UHyz nx+1 ny nz
in1 RBHyz nx-1 2*npml nz
in2 RAHyz nx-1 2*npml nz
in3 Ez nx+1 ny+1 nz
UHyz = UHyz * RBHyz + RAHyz * (Ez - Ez) / dy
运算块大小 nx-1 * npml * nz
UHyz由5个矩阵相乘或相加得来。
y维分为了两块
UHyz(2:nx, [1:npml ny-npml+1:ny], :)=RBHyz .* UHyz(2:nx, [1:npml ny-npml+1:ny], :)...
+RAHyz ./ dy .* (Ez(2:nx, [2:npml+1 ny-npml+2:ny+1], :) - Ez(2:nx, [1:npml ny-npml+1:ny], :));
*/
int ix = blockIdx.y; // ix in [0, nx - 1)
int iy = blockIdx.x; // iy in [0, npml)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = (ix + 1)*ny*nz + iy * nz + iz; // checked!
int rid0 = (ix + 1)*ny*nz + (iy + ny - npml) * nz + iz; //checked!
int lid1 = ix * (2 * npml)*nz + iy * nz + iz; // checked!
int rid1 = ix * (2 * npml)*nz + (iy + npml) * nz + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid3 = (ix + 1)*(ny + 1)*nz + (iy + 1)*nz + iz; // checked!
int rid3 = (ix + 1)*(ny + 1)*nz + (iy + ny - npml + 1)*nz + iz; // checked!
int lid4 = (ix + 1)*(ny + 1)*nz + iy * nz + iz; // checked!
int rid4 = (ix + 1)*(ny + 1)*nz + (iy + ny - npml)*nz + iz; // checked!
UHyz[lid0] = UHyz[lid0] * RBHyz[lid1] + RAHyz[lid2] * (Ez[lid3] - Ez[lid4]) / dy;
UHyz[rid0] = UHyz[rid0] * RBHyz[rid1] + RAHyz[rid2] * (Ez[rid3] - Ez[rid4]) / dy;
}
dim3 blockUHzy(npml);
dim3 gridUHzy(nx - 1, ny);
__global__ void calcUHzy(float *UHzy, float *RBHzy, float *RAHzy, float *Ey, const float dz)
{
/*
in0 UHzy --size-- nx+1 ny nz
in1 RBHzy --size-- nx-1 ny 2*npml
in2 RAHzy --size-- nx-1 ny 2*npml
in3 Ey --size-- nx+1 ny nz+1
UHyz = UHyz * RBHyz + RAHyz * (Ez - Ez) / dy
运算块大小 nx-1 * ny * (5 *npml)
UHyz由5个矩阵相乘或相加得来。
z维分为了两块
UHzy(2:nx, :, [1:npml nz-npml+1:nz])=RBHzy.*UHzy(2:nx, :, [1:npml nz-npml+1:nz])
+RAHzy./dz.*(Ey(2:nx, :, [2:npml+1 nz-npml+2:nz+1])-Ey(2:nx, :, [1:npml nz-npml+1:nz]));
*/
int ix = blockIdx.x; // ix in [0, nx - 1)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x; // ix in [0, npml)
int lid0 = (ix + 1) * ny * nz + iy * nz + iz; //checked!
int rid0 = (ix + 1) * ny * nz + iy * nz + iz + nz - npml; //checked!
int lid1 = ix * ny * (2 * npml) + iy * (2 * npml) + iz; //checked!
int rid1 = ix * ny * (2 * npml) + iy * (2 * npml) + iz + npml; //checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + iz; //checked!
int rid4 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + iz + nz - npml; //checked!
int lid3 = lid4 + 1;
int rid3 = rid4 + 1;
UHzy[lid0] = UHzy[lid0] * RBHzy[lid1] + RAHzy[lid2] * (Ey[lid3] - Ey[lid4]) / dz;
UHzy[rid0] = UHzy[rid0] * RBHzy[rid1] + RAHzy[rid2] * (Ey[rid3] - Ey[rid4]) / dz;
}
dim3 blockUHzx(npml);
dim3 gridUHzx(nx, ny - 1);
__global__ void calcUHzx(float *UHzx, float *RBHzx, float *RAHzx, float *Ex, const float dz)
{
/*
in0 UHzx --size-- nx ny + 1 nz
in1 RBHzx --size-- nx ny - 1 2 * npml
in2 RAHzx --size-- nx ny - 1 2 * npml
in3 Ex --size-- nx ny + 1 nz + 1
UHzx = UHzx * RBHzx + RAHzx * (Ez - Ez) / dy
运算块大小 nx * ny - 1 * npml
UHzx由5个矩阵相乘或相加得来。
z维分为了两块 1:npml -npml:0
UHzx(:, 2:ny, [1:npml nz - npml + 1:nz])=RBHzx. * UHzx(:, 2:ny, [1:npml nz - npml + 1:nz])
+RAHzx./dz.*(Ex(:, 2:ny, [2:npml + 1 nz - npml + 2:nz + 1]) - Ex(:, 2:ny, [1:npml nz - npml + 1:nz]));
*/
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y; // iy in [0, ny - 1)
int iz = threadIdx.x; // iz in [0, npml)
int lid0 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = ix * (ny + 1) * nz + (iy + 1) * nz + iz + nz - npml; // checked!
int lid1 = ix * (ny - 1) * (2 * npml) + iy * (2 * npml) + iz; // checked!
int rid1 = ix * (ny - 1) * (2 * npml) + iy * (2 * npml) + iz + npml; // checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + iz; // checked!
int rid4 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + iz + nz - npml; // checked!
int lid3 = lid4 + 1;
int rid3 = rid4 + 1;
UHzx[lid0] = UHzx[lid0] * RBHzx[lid1] + RAHzx[lid2] * (Ex[lid3] - Ex[lid4]) / dz;
UHzx[rid0] = UHzx[rid0] * RBHzx[rid1] + RAHzx[rid2] * (Ex[rid3] - Ex[rid4]) / dz;
}
dim3 blockUHxz(nz);
dim3 gridUHxz(npml, ny - 1);
__global__ void calcUHxz(float *UHxz, float *RBHxz, float *RAHxz, float *Ez, const float dx)
{
/*
in0 UHxz --size-- nx ny + 1 nz
in1 RBHxz --size-- 2*npml ny - 1 nz
in2 RAHxz --size-- 2*npml ny - 1 nz
in3 Ez --size-- nx + 1 ny + 1 nz
UHxz = UHxz * RBHxz + RAHxz * (Ez - Ez) / dx
运算块大小 npml * ny - 1 * nz
UHxz由5个矩阵相乘或相加得来。
x维分为了两块 1:npml -npml:0
UHxz([1:npml nx-npml+1:nx], 2:ny, :)=RBHxz.*UHxz([1:npml nx-npml+1:nx], 2:ny, :)...
+RAHxz./dx.*(Ez([2:npml+1 nx-npml+2:nx+1], 2:ny, :)-Ez([1:npml nx-npml+1:nx], 2:ny, :));
*/
int ix = blockIdx.x; // ix in [0, npml)
int iy = blockIdx.y; // iy in [0, ny - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = (ix + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int lid1 = ix * (ny - 1) * nz + iy * nz + iz; // checked!
int rid1 = (ix + npml) * (ny - 1) * nz + iy * nz + iz; // checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid4 = (ix + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int lid3 = lid4 + (ny + 1) * nz;
int rid3 = rid4 + (ny + 1) * nz;
UHxz[lid0] = UHxz[lid0] * RBHxz[lid1] + RAHxz[lid2] * (Ez[lid3] - Ez[lid4]) / dx;
UHxz[rid0] = UHxz[rid0] * RBHxz[rid1] + RAHxz[rid2] * (Ez[rid3] - Ez[rid4]) / dx;
}
dim3 blockUHxy(nz - 1);
dim3 gridUHxy(npml, ny);
__global__ void calcUHxy(float *UHxy, float *RBHxy, float *RAHxy, float *Ey, const float dx)
{
/*
in0 UHxy --size-- nx ny nz + 1
in1 RBHxy --size-- 2*npml ny nz - 1
in2 RAHxy --size-- 2*npml ny nz - 1
in3 EY --size-- nx + 1 ny nz + 1
UHxy = UHxy * RBHxy + RAHxy * (Ez - Ez) / dx
运算块大小 npml * ny * nz - 1
UHxy由5个矩阵相乘或相加得来。
x维分为了两块 1:npml -npml:0
UHxy([1:npml nx-npml+1:nx], :, 2:nz)=RBHxy.*UHxy([1:npml nx-npml+1:nx], :, 2:nz)...
+RAHxy./dx.*(Ey([2:npml+1 nx-npml+2:nx+1], :, 2:nz)-Ey([1:npml nx-npml+1:nx], :, 2:nz));
*/
int ix = blockIdx.x; // ix in [0, npml)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = ix * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid0 = (ix + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + iz + 1; //checked
int lid1 = ix * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = (ix + npml) * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = ix * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid4 = (ix + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int lid3 = lid4 + ny * (nz + 1);
int rid3 = rid4 + ny * (nz + 1);
UHxy[lid0] = UHxy[lid0] * RBHxy[lid1] + RAHxy[lid2] * (Ey[lid3] - Ey[lid4]) / dx;
UHxy[rid0] = UHxy[rid0] * RBHxy[rid1] + RAHxy[rid2] * (Ey[rid3] - Ey[rid4]) / dx;
}
dim3 blockUHyx(nz - 1);
dim3 gridUHyx(npml, nx);
__global__ void calcUHyx(float *UHyx, float *RBHyx, float *RAHyx, float *Ex, const float dy)
{
/*
in0 UHyx nx ny nz + 1
in1 RBHyx nx 2*npml nz - 1
in2 RAHyx nx 2*npml nz - 1
in3 Ex nx ny + 1 nz + 1
UHyx = UHyx * RBHyx + RAHyx * (Ex - Ex) / dy
运算块大小 nx * npml * nz - 1
UHyx由5个矩阵相乘或相加得来。
y维分为了两块
UHyx(:, [1:npml ny-npml+1:ny], 2:nz)=RBHyx.*UHyx(:, [1:npml ny-npml+1:ny], 2:nz)...
+RAHyx./dy.*(Ex(:, [2:npml+1 ny-npml+2:ny+1], 2:nz)-Ex(:, [1:npml ny-npml+1:ny], 2:nz));
*/
int ix = blockIdx.y; // ix in [0, nx)
int iy = blockIdx.x; // iy in [0, npml)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = ix * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid0 = ix * ny * (nz + 1) + (iy + ny - npml) * (nz + 1) + iz + 1; //checked!
int lid1 = ix * (2 * npml) * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = ix * (2 * npml) * (nz - 1) + (iy + npml) * (nz - 1) + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * (ny + 1) * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid4 = ix * (ny + 1) * (nz + 1) + (iy + ny - npml) * (nz + 1) + iz + 1; // checked!
int lid3 = lid4 + (nz + 1); // checked!
int rid3 = rid4 + (nz + 1); // checked!
UHyx[lid0] = UHyx[lid0] * RBHyx[lid1] + RAHyx[lid2] * (Ex[lid3] - Ex[lid4]) / dy;
UHyx[rid0] = UHyx[rid0] * RBHyx[rid1] + RAHyx[rid2] * (Ex[rid3] - Ex[rid4]) / dy;
}
dim3 blockHx(nz);
dim3 gridHx(nx - 1, ny);
__global__ void calcHx(float *Hx, float *CPHx, float *CQHx, float *ky_Hx, float *kz_Hx, float *Ez, float *Ey, float *UHyz, float *UHzy, const float dy, const float dz)
{
//
// * 运算块大小 nx - 1 * ny * nz
// * Hx(2:nx,:,:)
//
int ix = blockIdx.x + 1;
int iy = blockIdx.y;
int iz = threadIdx.x;
int idx = ix * ny * nz + iy * nz + iz;
int idxEz = ix * (ny + 1)*nz + iy * nz + iz;
int idxEy = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int deltaEz = nz;
int deltaEy = 1;
float CQH = CQHx[idx];
Hx[idx] = Hx[idx] * CPHx[idx]
- CQH / ky_Hx[idx] * (Ez[idxEz + deltaEz] - Ez[idxEz]) / dy
+ CQH / kz_Hx[idx] * (Ey[idxEy + deltaEy] - Ey[idxEy]) / dz
- CQH * UHyz[idx]
+ CQH * UHzy[idx];
}
dim3 blockHy(nz);
dim3 gridHy(nx, ny - 1);
__global__ void calcHy(float *Hy, float *CPHy, float *CQHy, float *kz_Hy, float *kx_Hy, float *Ex, float *Ez, float *UHzx, float *UHxz, const float dz, const float dx)
{
//
// * 运算块大小 nx * ny -1 * nz
// * Hy(:,2:ny,:)
//
int ix = blockIdx.x;
int iy = blockIdx.y + 1;
int iz = threadIdx.x;
int idx = ix * (ny + 1)*nz + iy * nz + iz;
int idxEx = ix * (ny + 1)*(nz + 1) + iy * (nz + 1) + iz;
int idxEz = ix * (ny + 1)*nz + iy * nz + iz;
int deltaEx = 1;
int deltaEz = (ny + 1)*nz;
float CQH = CQHy[idx];
Hy[idx] = Hy[idx] * CPHy[idx]
- CQH / kz_Hy[idx] * (Ex[idxEx + deltaEx] - Ex[idxEx]) / dz
+ CQH / kx_Hy[idx] * (Ez[idxEz + deltaEz] - Ez[idxEz]) / dx
- CQH * UHzx[idx]
+ CQH * UHxz[idx];
}
dim3 blockHz(nz - 1);
dim3 gridHz(nx, ny);
__global__ void calcHz(float *Hz, float *CPHz, float *CQHz, float *kx_Hz, float *ky_Hz, float *Ey, float *Ex, float *UHxy, float *UHyx, const float dx, const float dy)
{
//
// * 运算块大小 nx * ny * nz -1
// * Hz(:,;,2:nz)
// * Hz大小为nx ny nz+1
//
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x + 1;
int idx = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxEy = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxEx = ix * (ny + 1) * (nz + 1) + iy * (nz + 1) + iz;
int deltaEy = ny * (nz + 1);
int deltaEx = nz + 1;
float CQH = CQHz[idx];
Hz[idx] = Hz[idx] * CPHz[idx]
- CQH / kx_Hz[idx] * (Ey[idxEy + deltaEy] - Ey[idxEy]) / dx
+ CQH / ky_Hz[idx] * (Ex[idxEx + deltaEx] - Ex[idxEx]) / dy
- CQH * UHxy[idx]
+ CQH * UHyx[idx];
}
dim3 blockUEyz(nz - 1);
dim3 gridUEyz(npml - 1, nx);
__global__ void calcUEyz(float *UEyz, float *RBEyz, float *RAEyz, float *Hz, const float dy)
{
/*
dim3 blockUEyz(nz - 1);
dim3 gridUEyz(npml - 1, nx);
in0 UEyz nx ny + 1 nz + 1
in1 RBEyz nx 2*(npml-1) nz - 1
in2 RAEyz nx 2*(npml-1) nz - 1
in3 Hz nx ny nz + 1
运算块大小 nx * npml - 1 * nz - 1
UEyz(:, [2:npml ny-npml+2:ny], 2:nz)=RBEyz .* UEyz(:, [2:npml ny-npml+2:ny], 2:nz)...
+RAEyz ./ dy .* (Hz(:, [2:npml ny-npml+2:ny], 2:nz) - Hz(:, [1:npml-1 ny-npml+1:ny-1], 2:nz));
*/
int ix = blockIdx.y; // ix in [0, nx)
int iy = blockIdx.x; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + (iz + 1); // checked!
int rid0 = ix * (ny + 1) * (nz + 1) + (iy + 1 + ny - npml) * (nz + 1) + (iz + 1); //checked!
int lid1 = ix * (2 * (npml - 1)) * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = ix * (2 * (npml - 1)) * (nz - 1) + (iy + npml - 1) * (nz - 1) + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid4 = ix * ny * (nz + 1) + (iy + ny - npml) * (nz + 1) + (iz + 1); // checked!
int lid3 = lid4 + (nz + 1); // checked!
int rid3 = rid4 + (nz + 1); // checked!
UEyz[lid0] = UEyz[lid0] * RBEyz[lid1] + RAEyz[lid2] * (Hz[lid3] - Hz[lid4]) / dy;
UEyz[rid0] = UEyz[rid0] * RBEyz[rid1] + RAEyz[rid2] * (Hz[rid3] - Hz[rid4]) / dy;
}
dim3 blockUEyx(nz - 1);
dim3 gridUEyx(npml - 1, nx);
__global__ void calcUEyx(float *UEyx, float *RBEyx, float *RAEyx, float *Hx, const float dy)
{
/*
dim3 blockUEyx(nz - 1);
dim3 gridUEyx(npml - 1, nx);
in0 UEyx nx + 1 ny + 1 nz
in1 RBEyx nx - 1 2*(npml-1) nz
in2 RAEyx nx - 1 2*(npml-1) nz
in3 Hx nx + 1 ny nz
运算块大小 nx * npml-1 * nz-1
UEyx(2:nx, [2:npml ny-npml+2:ny], :)=RBEyx .* UEyx(2:nx, [2:npml ny-npml+2:ny], :)...
+RAEyx ./ dy .* (Hx(2:nx, [2:npml ny-npml+2:ny], :) - Hx(2:nx, [1:npml-1 ny-npml+1:ny-1], :));
*/
int ix = blockIdx.y; // ix in [0, nx)
int iy = blockIdx.x; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = (ix + 1) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = (ix + 1) * (ny + 1) * nz + (iy + 1 + ny - npml) * nz + iz; //checked!
int lid1 = ix * (2 * (npml - 1)) * nz + iy * nz + iz; // checked!
int rid1 = ix * (2 * (npml - 1)) * nz + (iy + npml - 1) * nz + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = (ix + 1) * ny * nz + iy * nz + iz; // checked!
int rid4 = (ix + 1) * ny * nz + (iy + ny - npml) * nz + iz; // checked!
int lid3 = lid4 + nz; // checked!
int rid3 = rid4 + nz; // checked!
UEyx[lid0] = UEyx[lid0] * RBEyx[lid1] + RAEyx[lid2] * (Hx[lid3] - Hx[lid4]) / dy;
UEyx[rid0] = UEyx[rid0] * RBEyx[rid1] + RAEyx[rid2] * (Hx[rid3] - Hx[rid4]) / dy;
}
dim3 blockUExy(nz);
dim3 gridUExy(npml - 1, ny - 1);
__global__ void calcUExy(float *UExy, float *RBExy, float *RAExy, float *Hy, const float dx)
{
/*
dim3 blockUExy(nz);
dim3 gridUExy(npml - 1, ny - 1);
in0 UExy nx + 1 ny + 1 nz
in1 RBExy 2*(npml-1) ny - 1 nz
in2 RAExy 2*(npml-1) ny - 1 nz
in3 Hy nx ny + 1 nz
运算块大小 npml-1 * ny-1 * nz
UExy([2:npml nx-npml+2:nx], 2:ny, :)=RBExy .* UExy([2:npml nx-npml+2:nx], 2:ny, :)...
+RAExy ./ dx .* (Hy([2:npml nx-npml+2:nx], 2:ny, :) - Hy([1:npml-1 nx-npml+1:nx-1], 2:ny, :));
*/
int ix = blockIdx.x; // ix in [0, npml - 1)
int iy = blockIdx.y; // iy in [0, ny - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = (ix + 1) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = (ix + 1 + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; //checked!
int lid1 = ix * (ny - 1) * nz + iy * nz + iz; // checked!
int rid1 = (ix + npml - 1) * (ny - 1) * nz + iy * nz + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid4 = (ix + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int lid3 = lid4 + (ny + 1) * nz; // checked!
int rid3 = rid4 + (ny + 1) * nz; // checked!
UExy[lid0] = UExy[lid0] * RBExy[lid1] + RAExy[lid2] * (Hy[lid3] - Hy[lid4]) / dx;
UExy[rid0] = UExy[rid0] * RBExy[rid1] + RAExy[rid2] * (Hy[rid3] - Hy[rid4]) / dx;
}
dim3 blockUExz(nz - 1);
dim3 gridUExz(npml - 1, ny);
__global__ void calcUExz(float *UExz, float *RBExz, float *RAExz, float *Hz, const float dx)
{
/*
dim3 blockUExz(nz - 1);
dim3 gridUExz(npml - 1, ny);
in0 UExz nx + 1 ny nz + 1
in1 RBExz 2*(npml-1) ny nz - 1
in2 RAExz 2*(npml-1) ny nz - 1
in3 Hz nx ny nz + 1
运算块大小 npml-1 * ny * nz-1
UExz([2:npml nx-npml+2:nx], :, 2:nz)=RBExz .* UExz([2:npml nx-npml+2:nx], :, 2:nz)...
+RAExz ./ dx .* (Hz([2:npml nx-npml+2:nx], :, 2:nz) - Hz([1:npml-1 nx-npml+1:nx-1], :, 2:nz));
*/
int ix = blockIdx.x; // ix in [0, npml - 1)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid0 = (ix + 1 + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); //checked!
int lid1 = ix * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = (ix + npml - 1) * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid4 = (ix + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int lid3 = lid4 + ny * (nz + 1); // checked!
int rid3 = rid4 + ny * (nz + 1); // checked!
UExz[lid0] = UExz[lid0] * RBExz[lid1] + RAExz[lid2] * (Hz[lid3] - Hz[lid4]) / dx;
UExz[rid0] = UExz[rid0] * RBExz[rid1] + RAExz[rid2] * (Hz[rid3] - Hz[rid4]) / dx;
}
dim3 blockUEzx(npml - 1);
dim3 gridUEzx(nx - 1, ny);
__global__ void calcUEzx(float *UEzx, float *RBEzx, float *RAEzx, float *Hx, const float dz)
{
/*
dim3 blockUEzx(npml - 1);
dim3 gridUEzx(nx - 1, ny);
in0 UEzx nx + 1 ny nz + 1
in1 RBEzx nx - 1 ny 2*(npml-1)
in2 RAEzx nx - 1 ny 2*(npml-1)
in3 Hx nx + 1 ny nz
运算块大小 nx-1 * ny * npml-1
UEzx(2:nx, :, [2:npml nz-npml+2:nz])=RBEzx .* UEzx(2:nx, :, [2:npml nz-npml+2:nz])...
+RAEzx ./ dz .* (Hx(2:nx, :, [2:npml nz-npml+2:nz]) - Hx(2:nx, :, [1:npml-1 nz-npml+1:nz-1]));
*/
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid0 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1 + nz - npml); //checked!
int lid1 = ix * ny * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + iz; // checked!
int rid1 = ix * ny * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + (iz + npml - 1); // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = (ix + 1) * ny * nz + iy * nz + iz; // checked!
int rid4 = (ix + 1) * ny * nz + iy * nz + (iz + nz - npml); // checked!
int lid3 = lid4 + 1; // checked!
int rid3 = rid4 + 1; // checked!
UEzx[lid0] = UEzx[lid0] * RBEzx[lid1] + RAEzx[lid2] * (Hx[lid3] - Hx[lid4]) / dz;
UEzx[rid0] = UEzx[rid0] * RBEzx[rid1] + RAEzx[rid2] * (Hx[rid3] - Hx[rid4]) / dz;
}
dim3 blockUEzy(npml - 1);
dim3 gridUEzy(nx, ny - 1);
__global__ void calcUEzy(float *UEzy, float *RBEzy, float *RAEzy, float *Hy, const float dz)
{
/*
dim3 blockUEzy(npml - 1);
dim3 gridUEzy(nx, ny - 1);
in0 UEzy nx ny + 1 nz + 1
in1 RBEzy nx ny - 1 2*(npml-1)
in2 RAEzy nx ny - 1 2*(npml-1)
in3 Hy nx ny + 1 nz
运算块大小 nx * ny - 1 * npml-1
UEzy(:, 2:ny, [2:npml nz-npml+2:nz])=RBEzy.*UEzy(:, 2:ny, [2:npml nz-npml+2:nz])...
+RAEzy./dz.*(Hy(:, 2:ny, [2:npml nz-npml+2:nz])-Hy(:, 2:ny, [1:npml-1 nz-npml+1:nz-1]));
*/
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + (iz + 1); // checked!
int rid0 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + (iz + 1 + nz - npml); //checked!
int lid1 = ix * (ny - 1) * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + iz; // checked!
int rid1 = ix * (ny - 1) * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + (iz + npml - 1); // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid4 = ix * (ny + 1) * nz + (iy + 1) * nz + (iz + nz - npml); // checked!
int lid3 = lid4 + 1; // checked!
int rid3 = rid4 + 1; // checked!
UEzy[lid0] = UEzy[lid0] * RBEzy[lid1] + RAEzy[lid2] * (Hy[lid3] - Hy[lid4]) / dz;
UEzy[rid0] = UEzy[rid0] * RBEzy[rid1] + RAEzy[rid2] * (Hy[rid3] - Hy[rid4]) / dz;
}
dim3 blockEx(nz - 1);
dim3 gridEx(nx, ny - 1);
__global__ void calcEx(float *Ex, float *CAEx, float *CBEx, float *ky_Ex, float *kz_Ex, float *Hz, float *Hy, float *UEyz, float *UEzy, const float dy, const float dz)
{
//
// * dim3 blockEx(nz-1);
// * dim3 gridEx(nx, ny-1);
// * 运算块大小 nx * ny-1 * nz-1
// * Ex(:, 2:ny, 2:nz)
//
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y + 1; // iy in [1, ny)
int iz = threadIdx.x + 1; // iz in [1, nz)
int idx = ix * (ny + 1) * (nz + 1) + iy * (nz + 1) + iz;
int idxHz = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxHy = ix * (ny + 1)*nz + iy * nz + iz;
int deltaHz = nz + 1;
int deltaHy = 1;
float CBE = CBEx[idx];
Ex[idx] = Ex[idx] * CAEx[idx]
+ CBE / ky_Ex[idx] * (Hz[idxHz] - Hz[idxHz - deltaHz]) / dy
- CBE / kz_Ex[idx] * (Hy[idxHy] - Hy[idxHy - deltaHy]) / dz
+ CBE * UEyz[idx]
- CBE * UEzy[idx];
}
dim3 blockEy(nz - 1);
dim3 gridEy(nx - 1, ny);
__global__ void calcEy(float *Ey, float *CAEy, float *CBEy, float *kz_Ey, float *kx_Ey, float *Hx, float *Hz, float *UEzx, float *UExz, const float dz, const float dx)
{
//
// * dim3 blockEy(nz-1);
// * dim3 gridEy(nx-1, ny);
// * 运算块大小 nx-1 * ny * nz-1
// * Ey(2:nx, :, 2:nz)
//
int ix = blockIdx.x + 1; // ix in [1, nx)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x + 1; // iz in [1, nz)
int idx = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxHx = ix * ny * nz + iy * nz + iz;
int idxHz = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int deltaHx = 1;
int deltaHz = ny * (nz + 1);
float CBE = CBEy[idx];
Ey[idx] = Ey[idx] * CAEy[idx]
+ CBE / kz_Ey[idx] * (Hx[idxHx] - Hx[idxHx - deltaHx]) / dz
- CBE / kx_Ey[idx] * (Hz[idxHz] - Hz[idxHz - deltaHz]) / dx
+ CBE * UEzx[idx]
- CBE * UExz[idx];
}
dim3 blockEz(nz);
dim3 gridEz(nx - 1, ny - 1);
__global__ void calcEz(float *Ez, float *CAEz, float *CBEz, float *kx_Ez, float *ky_Ez, float *Hy, float *Hx, float *UExy, float *UEyx, const float dx, const float dy)
{
//
// * dim3 blockEz(nz);
// * dim3 gridEz(nx-1, ny-1);
// * 运算块大小 nx-1 * ny-1 * nz
// * Ez(2:nx, 2:ny, :)
// * Ez大小为nx ny nz+1
//
int ix = blockIdx.x + 1; // ix in [1, nx)
int iy = blockIdx.y + 1; // iy in [1, ny)
int iz = threadIdx.x; // iz in [0, nz)
int idx = ix * (ny + 1) * nz + iy * nz + iz;
int idxHy = ix * (ny + 1) * nz + iy * nz + iz;
int idxHx = ix * ny * nz + iy * nz + iz;
int deltaHy = (ny + 1) * nz;
int deltaHx = nz;
float CBE = CBEz[idx];
Ez[idx] = Ez[idx] * CAEz[idx]
+ CBE / kx_Ez[idx] * (Hy[idxHy] - Hy[idxHy - deltaHy]) / dx
- CBE / ky_Ez[idx] * (Hx[idxHx] - Hx[idxHx - deltaHx]) / dy
+ CBE * UExy[idx]
- CBE * UEyx[idx];
}
__global__ void print_dev_matrix(float *A, int i,int j,int k,int xdim,int ydim,int zdim)
{
int idx = i * ydim*zdim + j * zdim + k;
printf("dev_Matrix[%d][%d][%d] = %8f\n", i, j, k, A[idx]);
}
void readInteger(const char *name, int *a, int n1, int n2, int n3)
{
FILE *fp = fopen(name, "r");
if (fp == NULL) // 判断文件读入是否正确
{
printf("fopen %s error! \n", name);
return;
}
printf("fopen %s ok! \n", name);
for (int i = 0; i < n1; i++)
{
for (int k = 0; k < n3; k++)
{
for (int j = 0; j < n2; j++)
{
fscanf(fp, "%d", &a[i * n2*n3 + j * n3 + k]); // 读入a[i][j][k]
}
}
}
printf("read %s OK\n", name);
fclose(fp);
return;
}
void readFloat(const char *name, float *a, int n1, int n2, int n3)
{
FILE *fp = fopen(name, "r");
if (fp == NULL) // 判断文件读入是否正确
{
printf("fopen %s error! \n", name);
return;
}
printf("fopen %s ok! \n", name);
for (int i = 0; i < n1; i++)
{
for (int k = 0; k < n3; k++)
{
for (int j = 0; j < n2; j++)
{
fscanf(fp, "%f", a + i * n2*n3 + j * n3 + k); // 读入a[i][j][k]
}
}
}
printf("read %s OK\n", name);
fclose(fp);
return;
}
void readAllData()
{
readFloat("data/CAEx.txt", (float*)CAEx, nx, ny + 1, nz + 1);
readFloat("data/CBEx.txt", (float*)CBEx, nx, ny + 1, nz + 1);
readFloat("data/RAEyz.txt", (float*)RAEyz, nx, 2 * (npml - 1), nz - 1);
readFloat("data/RBEyz.txt", (float*)RBEyz, nx, 2 * (npml - 1), nz - 1);
readFloat("data/RAEzy.txt", (float*)RAEzy, nx, ny - 1, 2 * (npml - 1));
readFloat("data/RBEzy.txt", (float*)RBEzy, nx, ny - 1, 2 * (npml - 1));
readFloat("data/CAEy.txt", (float*)CAEy, nx + 1, ny, nz + 1);
readFloat("data/CBEy.txt", (float*)CBEy, nx + 1, ny, nz + 1);
readFloat("data/RAEzx.txt", (float*)RAEzx, nx - 1, ny, 2 * (npml - 1));
readFloat("data/RBEzx.txt", (float*)RBEzx, nx - 1, ny, 2 * (npml - 1));
readFloat("data/RAExz.txt", (float*)RAExz, 2 * (npml - 1), ny, nz - 1);
readFloat("data/RBExz.txt", (float*)RBExz, 2 * (npml - 1), ny, nz - 1);
readFloat("data/CAEz.txt", (float*)CAEz, nx + 1, ny + 1, nz);
readFloat("data/CBEz.txt", (float*)CBEz, nx + 1, ny + 1, nz);
readFloat("data/RAExy.txt", (float*)RAExy, 2 * (npml - 1), ny - 1, nz);
readFloat("data/RBExy.txt", (float*)RBExy, 2 * (npml - 1), ny - 1, nz);
readFloat("data/RAEyx.txt", (float*)RAEyx, nx - 1, 2 * (npml - 1), nz);
readFloat("data/RBEyx.txt", (float*)RBEyx, nx - 1, 2 * (npml - 1), nz);
readFloat("data/CPHx.txt", (float*)CPHx, nx + 1, ny, nz);
readFloat("data/CQHx.txt", (float*)CQHx, nx + 1, ny, nz);
readFloat("data/RAHyz.txt", (float*)RAHyz, nx - 1, 2 * npml, nz);
readFloat("data/RBHyz.txt", (float*)RBHyz, nx - 1, 2 * npml, nz);
readFloat("data/RAHzy.txt", (float*)RAHzy, nx - 1, ny, 2 * npml);
readFloat("data/RBHzy.txt", (float*)RBHzy, nx - 1, ny, 2 * npml);
readFloat("data/CPHy.txt", (float*)CPHy, nx, ny + 1, nz);
readFloat("data/CQHy.txt", (float*)CQHy, nx, ny + 1, nz);
readFloat("data/RAHzx.txt", (float*)RAHzx, nx, ny - 1, 2 * npml);
readFloat("data/RBHzx.txt", (float*)RBHzx, nx, ny - 1, 2 * npml);
readFloat("data/RAHxz.txt", (float*)RAHxz, 2 * npml, ny - 1, nz);
readFloat("data/RBHxz.txt", (float*)RBHxz, 2 * npml, ny - 1, nz);
readFloat("data/CPHz.txt", (float*)CPHz, nx, ny, nz + 1);
readFloat("data/CQHz.txt", (float*)CQHz, nx, ny, nz + 1);
readFloat("data/RAHxy.txt", (float*)RAHxy, 2 * npml, ny, nz - 1);
readFloat("data/RBHxy.txt", (float*)RBHxy, 2 * npml, ny, nz - 1);
readFloat("data/RAHyx.txt", (float*)RAHyx, nx, 2 * npml, nz - 1);
readFloat("data/RBHyx.txt", (float*)RBHyx, nx, 2 * npml, nz - 1);
readFloat("data/kx_Ey.txt", (float*)kx_Ey, nx + 1, ny, nz + 1);
readFloat("data/kx_Ez.txt", (float*)kx_Ez, nx + 1, ny + 1, nz);
readFloat("data/ky_Ex.txt", (float*)ky_Ex, nx, ny + 1, nz + 1);
readFloat("data/ky_Ez.txt", (float*)ky_Ez, nx + 1, ny + 1, nz);
readFloat("data/kz_Ex.txt", (float*)kz_Ex, nx, ny + 1, nz + 1);
readFloat("data/kz_Ey.txt", (float*)kz_Ey, nx + 1, ny, nz + 1);
readFloat("data/kx_Hy.txt", (float*)kx_Hy, nx, ny + 1, nz);
readFloat("data/kx_Hz.txt", (float*)kx_Hz, nx, ny, nz + 1);
readFloat("data/ky_Hx.txt", (float*)ky_Hx, nx + 1, ny, nz);
readFloat("data/ky_Hz.txt", (float*)ky_Hz, nx, ny, nz + 1);
readFloat("data/kz_Hx.txt", (float*)kz_Hx, nx + 1, ny, nz);
readFloat("data/kz_Hy.txt", (float*)kz_Hy, nx, ny + 1, nz);
readInteger("data/fswzx.txt", (int*)fswzx, 1, 1, szfsw);
readInteger("data/fswzy.txt", (int*)fswzy, 1, 1, szfsw);
readInteger("data/fswzz.txt", (int*)fswzz, 1, 1, szfsw);
readInteger("data/jswzx.txt", (int*)jswzx, 1, 1, szfsw);
readInteger("data/jswzy.txt", (int*)jswzy, 1, 1, szfsw);
readInteger("data/jswzz.txt", (int*)jswzz, 1, 1, szfsw);
readFloat("data/source.txt", (float*)source, 1, 1, it);
}
void printE_obs()
{
const char *name = "output/E_obs.txt";
FILE *fp = fopen(name, "w+");
if (fp == NULL) // 判断文件读入是否正确
{
printf("fopen %s error! \n", name);
}
printf("print fopen %s ok! \n", name);
fprintf(fp, "输出E_obs[%d][%d]\n", it, szfsw);
fprintf(fp, "共有 %d 行 %d 列 \n", szfsw, it);
for (int i = 0; i < szfsw; i++)
{
for (int j = 0; j < it; j++)
{
fprintf(fp, "%8f ", E_obs[j][i]);
}
fprintf(fp, "\n");
}
printf("print %s OK\n", name);
fclose(fp);
return;
}
void gpu_memory_malloc()
{
//原来内存中存在的数组,数组大小用内存数组大小就行
cudaMalloc((void**)&dev_CAEx, sizeof(CAEx));
cudaMalloc((void**)&dev_CBEx, sizeof(CBEx));
cudaMalloc((void**)&dev_RAEyz, sizeof(RAEyz));
cudaMalloc((void**)&dev_RBEyz, sizeof(RBEyz));
cudaMalloc((void**)&dev_RAEzy, sizeof(RAEzy));
cudaMalloc((void**)&dev_RBEzy, sizeof(RBEzy));
cudaMalloc((void**)&dev_CAEy, sizeof(CAEy));
cudaMalloc((void**)&dev_CBEy, sizeof(CBEy));
cudaMalloc((void**)&dev_RAExz, sizeof(RAExz));
cudaMalloc((void**)&dev_RBExz, sizeof(RBExz));
cudaMalloc((void**)&dev_RAEzx, sizeof(RAEzx));
cudaMalloc((void**)&dev_RBEzx, sizeof(RBEzx));
cudaMalloc((void**)&dev_CAEz, sizeof(CAEz));
cudaMalloc((void**)&dev_CBEz, sizeof(CBEz));
cudaMalloc((void**)&dev_RAExy, sizeof(RAExy));
cudaMalloc((void**)&dev_RBExy, sizeof(RBExy));
cudaMalloc((void**)&dev_RAEyx, sizeof(RAEyx));
cudaMalloc((void**)&dev_RBEyx, sizeof(RBEyx));
cudaMalloc((void**)&dev_CPHx, sizeof(CPHx));
cudaMalloc((void**)&dev_CQHx, sizeof(CQHx));
cudaMalloc((void**)&dev_RAHyz, sizeof(RAHyz));
cudaMalloc((void**)&dev_RBHyz, sizeof(RBHyz));
cudaMalloc((void**)&dev_RAHzy, sizeof(RAHzy));
cudaMalloc((void**)&dev_RBHzy, sizeof(RBHzy));
cudaMalloc((void**)&dev_CPHy, sizeof(CPHy));
cudaMalloc((void**)&dev_CQHy, sizeof(CQHy));
cudaMalloc((void**)&dev_RAHxz, sizeof(RAHxz));
cudaMalloc((void**)&dev_RBHxz, sizeof(RBHxz));
cudaMalloc((void**)&dev_RAHzx, sizeof(RAHzx));
cudaMalloc((void**)&dev_RBHzx, sizeof(RBHzx));
cudaMalloc((void**)&dev_CPHz, sizeof(CPHz));
cudaMalloc((void**)&dev_CQHz, sizeof(CQHz));
cudaMalloc((void**)&dev_RAHxy, sizeof(RAHxy));
cudaMalloc((void**)&dev_RBHxy, sizeof(RBHxy));
cudaMalloc((void**)&dev_RAHyx, sizeof(RAHyx));
cudaMalloc((void**)&dev_RBHyx, sizeof(RBHyx));
cudaMalloc((void**)&dev_kx_Ey, sizeof(kx_Ey));
cudaMalloc((void**)&dev_kx_Ez, sizeof(kx_Ez));
cudaMalloc((void**)&dev_ky_Ex, sizeof(ky_Ex));
cudaMalloc((void**)&dev_ky_Ez, sizeof(ky_Ez));
cudaMalloc((void**)&dev_kz_Ex, sizeof(kz_Ex));
cudaMalloc((void**)&dev_kz_Ey, sizeof(kz_Ey));
cudaMalloc((void**)&dev_kx_Hy, sizeof(kx_Hy));
cudaMalloc((void**)&dev_kx_Hz, sizeof(kx_Hz));
cudaMalloc((void**)&dev_ky_Hx, sizeof(ky_Hx));
cudaMalloc((void**)&dev_ky_Hz, sizeof(ky_Hz));
cudaMalloc((void**)&dev_kz_Hx, sizeof(kz_Hx));
cudaMalloc((void**)&dev_kz_Hy, sizeof(kz_Hy));
//gpu显存新创建数组,原来内存中不存在
int szEx = nx * (ny + 1)*(nz + 1);
int szEy = (nx + 1)*ny*(nz + 1);
int szEz = (nx + 1)*(ny + 1)*nz;
int szHx = (nx + 1)*ny*nz;
int szHy = nx * (ny + 1)*nz;
int szHz = nx * ny*(nz + 1);
cudaMalloc((void**)&dev_Ex, szEx * sizeof(float));
cudaMalloc((void**)&dev_UEyz, szEx * sizeof(float));
cudaMalloc((void**)&dev_UEzy, szEx * sizeof(float));
cudaMalloc((void**)&dev_Ey, szEy * sizeof(float));
cudaMalloc((void**)&dev_UEzx, szEy * sizeof(float));
cudaMalloc((void**)&dev_UExz, szEy * sizeof(float));
cudaMalloc((void**)&dev_Ez, szEz * sizeof(float));
cudaMalloc((void**)&dev_UExy, szEz * sizeof(float));
cudaMalloc((void**)&dev_UEyx, szEz * sizeof(float));
cudaMalloc((void**)&dev_Hx, szHx * sizeof(float));
cudaMalloc((void**)&dev_UHyz, szHx * sizeof(float));
cudaMalloc((void**)&dev_UHzy, szHx * sizeof(float));
cudaMalloc((void**)&dev_Hy, szHy * sizeof(float));
cudaMalloc((void**)&dev_UHzx, szHy * sizeof(float));
cudaMalloc((void**)&dev_UHxz, szHy * sizeof(float));
cudaMalloc((void**)&dev_Hz, szHz * sizeof(float));
cudaMalloc((void**)&dev_UHxy, szHz * sizeof(float));
cudaMalloc((void**)&dev_UHyx, szHz * sizeof(float));
cudaMalloc((void**)&dev_V, sizeof(V));
cudaMalloc((void**)&dev_E_obs, sizeof(E_obs));
cudaMalloc((void**)&dev_source, sizeof(source));
}
void gpu_memory_set_zero()
{
int szEx = nx * (ny + 1)*(nz + 1);
int szEy = (nx + 1)*ny*(nz + 1);
int szEz = (nx + 1)*(ny + 1)*nz;
int szHx = (nx + 1)*ny*nz;
int szHy = nx * (ny + 1)*nz;
int szHz = nx * ny*(nz + 1);
//gpu显存新创建数组,原来内存中不存在
cudaMemset(dev_Ex, 0, szEx * sizeof(float));
cudaMemset(dev_UEyz, 0, szEx * sizeof(float));
cudaMemset(dev_UEzy, 0, szEx * sizeof(float));
cudaMemset(dev_Ey, 0, szEy * sizeof(float));
cudaMemset(dev_UEzx, 0, szEy * sizeof(float));
cudaMemset(dev_UExz, 0, szEy * sizeof(float));
cudaMemset(dev_Ez, 0, szEz * sizeof(float));
cudaMemset(dev_UExy, 0, szEz * sizeof(float));
cudaMemset(dev_UEyx, 0, szEz * sizeof(float));
cudaMemset(dev_Hx, 0, szHx * sizeof(float));
cudaMemset(dev_UHyz, 0, szHx * sizeof(float));
cudaMemset(dev_UHzy, 0, szHx * sizeof(float));
cudaMemset(dev_Hy, 0, szHy * sizeof(float));
cudaMemset(dev_UHzx, 0, szHy * sizeof(float));
cudaMemset(dev_UHxz, 0, szHy * sizeof(float));
cudaMemset(dev_Hz, 0, szHz * sizeof(float));
cudaMemset(dev_UHxy, 0, szHz * sizeof(float));
cudaMemset(dev_UHyx, 0, szHz * sizeof(float));
cudaMemset(dev_V, 0, sizeof(V));
cudaMemset(dev_E_obs, 0, sizeof(E_obs));
}
void gpu_memory_copy()
{
cudaError_t cudaStatus;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_CAEx, CAEx, sizeof(CAEx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_CBEx, CBEx, sizeof(CBEx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RAEyz, RAEyz, sizeof(RAEyz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RBEyz, RBEyz, sizeof(RBEyz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RAEzy, RAEzy, sizeof(RAEzy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RBEzy, RBEzy, sizeof(RBEzy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_CAEy, CAEy, sizeof(CAEy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_CBEy, CBEy, sizeof(CBEy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RAExz, RAExz, sizeof(RAExz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RBExz, RBExz, sizeof(RBExz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RAEzx, RAEzx, sizeof(RAEzx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RBEzx, RBEzx, sizeof(RBEzx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_CAEz, CAEz, sizeof(CAEz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_CBEz, CBEz, sizeof(CBEz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RAExy, RAExy, sizeof(RAExy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RBExy, RBExy, sizeof(RBExy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RAEyx, RAEyx, sizeof(RAEyx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RBEyx, RBEyx, sizeof(RBEyx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_CPHx, CPHx, sizeof(CPHx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_CQHx, CQHx, sizeof(CQHx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RAHyz, RAHyz, sizeof(RAHyz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RBHyz, RBHyz, sizeof(RBHyz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RAHzy, RAHzy, sizeof(RAHzy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RBHzy, RBHzy, sizeof(RBHzy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_CPHy, CPHy, sizeof(CPHy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_CQHy, CQHy, sizeof(CQHy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RAHxz, RAHxz, sizeof(RAHxz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RBHxz, RBHxz, sizeof(RBHxz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RAHzx, RAHzx, sizeof(RAHzx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RBHzx, RBHzx, sizeof(RBHzx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_CPHz, CPHz, sizeof(CPHz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_CQHz, CQHz, sizeof(CQHz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RAHxy, RAHxy, sizeof(RAHxy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RBHxy, RBHxy, sizeof(RBHxy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RAHyx, RAHyx, sizeof(RAHyx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_RBHyx, RBHyx, sizeof(RBHyx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_kx_Ey, kx_Ey, sizeof(kx_Ey), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_kx_Ez, kx_Ez, sizeof(kx_Ez), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_ky_Ex, ky_Ex, sizeof(ky_Ex), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_ky_Ez, ky_Ez, sizeof(ky_Ez), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_kz_Ex, kz_Ex, sizeof(kz_Ex), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_kz_Ey, kz_Ey, sizeof(kz_Ey), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_kx_Hy, kx_Hy, sizeof(kx_Hy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_kx_Hz, kx_Hz, sizeof(kx_Hz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_ky_Hx, ky_Hx, sizeof(ky_Hx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_ky_Hz, ky_Hz, sizeof(ky_Hz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_kz_Hx, kz_Hx, sizeof(kz_Hx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_kz_Hy, kz_Hy, sizeof(kz_Hy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
cudaStatus = cudaMemcpy(dev_source, source, sizeof(source), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); goto Error; }
Error:
return;
}
void gpu_memory_free()
{
cudaFree(dev_CAEx);
cudaFree(dev_CBEx);
cudaFree(dev_RAEyz);
cudaFree(dev_RBEyz);
cudaFree(dev_RAEzy);
cudaFree(dev_RBEzy);
cudaFree(dev_CAEy);
cudaFree(dev_CBEy);
cudaFree(dev_RAExz);
cudaFree(dev_RBExz);
cudaFree(dev_RAEzx);
cudaFree(dev_RBEzx);
cudaFree(dev_CAEz);
cudaFree(dev_CBEz);
cudaFree(dev_RAExy);
cudaFree(dev_RBExy);
cudaFree(dev_RAEyx);
cudaFree(dev_RBEyx);
cudaFree(dev_CPHx);
cudaFree(dev_CQHx);
cudaFree(dev_RAHyz);
cudaFree(dev_RBHyz);
cudaFree(dev_RAHzy);
cudaFree(dev_RBHzy);
cudaFree(dev_CPHy);
cudaFree(dev_CQHy);
cudaFree(dev_RAHxz);
cudaFree(dev_RBHxz);
cudaFree(dev_RAHzx);
cudaFree(dev_RBHzx);
cudaFree(dev_CPHz);
cudaFree(dev_CQHz);
cudaFree(dev_RAHxy);
cudaFree(dev_RBHxy);
cudaFree(dev_RAHyx);
cudaFree(dev_RBHyx);
cudaFree(dev_kx_Ey);
cudaFree(dev_kx_Ez);
cudaFree(dev_ky_Ex);
cudaFree(dev_ky_Ez);
cudaFree(dev_kz_Ex);
cudaFree(dev_kz_Ey);
cudaFree(dev_kx_Hy);
cudaFree(dev_kx_Hz);
cudaFree(dev_ky_Hx);
cudaFree(dev_ky_Hz);
cudaFree(dev_kz_Hx);
cudaFree(dev_kz_Hy);
cudaFree(dev_Ex);
cudaFree(dev_UEyz);
cudaFree(dev_UEzy);
cudaFree(dev_Ey);
cudaFree(dev_UEzx);
cudaFree(dev_UExz);
cudaFree(dev_Ez);
cudaFree(dev_UExy);
cudaFree(dev_UEyx);
cudaFree(dev_Hx);
cudaFree(dev_UHyz);
cudaFree(dev_UHzy);
cudaFree(dev_Hy);
cudaFree(dev_UHzx);
cudaFree(dev_UHxz);
cudaFree(dev_Hz);
cudaFree(dev_UHxy);
cudaFree(dev_UHyx);
cudaFree(dev_V);
cudaFree(dev_E_obs);
cudaFree(dev_source);
}
cudaError_t calcWithCuda()
{
cudaError_t cudaStatus;
// 调用kernel函数计算。
int i, j;
for (i = 0; i < szfsw; i++)
{
gpu_memory_set_zero();
for (j = 0; j < it; j++)
{
if (j % 200 == 0)
{
printf("i = %3d / %d, j = %4d / %d\n", i, szfsw, j, it);
}
// 实现MATLAB中的Ex[fswzx[i] - 1][fswzy[i] - 1][fswzz[i] - 1] = source[j];
int fidx = (fswzx[i] - 1)*(ny + 1)*(nz + 1) + (fswzy[i] - 1)*(nz + 1) + fswzz[i] - 1;
cudaStatus = cudaMemcpy(&(dev_Ex[fidx]), &(dev_source[j]), sizeof(float), cudaMemcpyDeviceToDevice);
calcUHyz << < gridUHyz, blockUHyz >> > (dev_UHyz, dev_RBHyz, dev_RAHyz, dev_Ez, dy);
calcUHzy << < gridUHzy, blockUHzy >> > (dev_UHzy, dev_RBHzy, dev_RAHzy, dev_Ey, dz);
calcUHxy << < gridUHxy, blockUHxy >> > (dev_UHxy, dev_RBHxy, dev_RAHxy, dev_Ey, dx);
calcUHxz << < gridUHxz, blockUHxz >> > (dev_UHxz, dev_RBHxz, dev_RAHxz, dev_Ez, dx);
calcUHyx << < gridUHyx, blockUHyx >> > (dev_UHyx, dev_RBHyx, dev_RAHyx, dev_Ex, dy);
calcUHzx << < gridUHzx, blockUHzx >> > (dev_UHzx, dev_RBHzx, dev_RAHzx, dev_Ex, dz);
calcHx << < gridHx, blockHx >> > (dev_Hx, dev_CPHx, dev_CQHx, dev_ky_Hx, dev_kz_Hx, dev_Ez, dev_Ey, dev_UHyz, dev_UHzy, dy, dz);
calcHy << < gridHy, blockHy >> > (dev_Hy, dev_CPHy, dev_CQHy, dev_kz_Hy, dev_kx_Hy, dev_Ex, dev_Ez, dev_UHzx, dev_UHxz, dz, dx);
calcHz << < gridHz, blockHz >> > (dev_Hz, dev_CPHz, dev_CQHz, dev_kx_Hz, dev_ky_Hz, dev_Ey, dev_Ex, dev_UHxy, dev_UHyx, dx, dy);
calcUExy << < gridUExy, blockUExy >> > (dev_UExy, dev_RBExy, dev_RAExy, dev_Hy, dx);
calcUExz << < gridUExz, blockUExz >> > (dev_UExz, dev_RBExz, dev_RAExz, dev_Hz, dx);
calcUEyx << < gridUEyx, blockUEyx >> > (dev_UEyx, dev_RBEyx, dev_RAEyx, dev_Hx, dy);
calcUEyz << < gridUEyz, blockUEyz >> > (dev_UEyz, dev_RBEyz, dev_RAEyz, dev_Hz, dy);
calcUEzx << < gridUEzx, blockUEzx >> > (dev_UEzx, dev_RBEzx, dev_RAEzx, dev_Hx, dz);
calcUEzy << < gridUEzy, blockUEzy >> > (dev_UEzy, dev_RBEzy, dev_RAEzy, dev_Hy, dz);
calcEx << < gridEx, blockEx >> > (dev_Ex, dev_CAEx, dev_CBEx, dev_ky_Ex, dev_kz_Ex, dev_Hz, dev_Hy, dev_UEyz, dev_UEzy, dy, dz);
calcEy << < gridEy, blockEy >> > (dev_Ey, dev_CAEy, dev_CBEy, dev_kz_Ey, dev_kx_Ey, dev_Hx, dev_Hz, dev_UEzx, dev_UExz, dz, dx);
calcEz << < gridEz, blockEz >> > (dev_Ez, dev_CAEz, dev_CBEz, dev_kx_Ez, dev_ky_Ez, dev_Hy, dev_Hx, dev_UExy, dev_UEyx, dx, dy);
// 计算过程是否出错?
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) { printf("calc failed: %s\n", cudaGetErrorString(cudaStatus)); return cudaStatus; }
// 实现MATLAB中的V(j)=Ex(jswzx(i), jswzy(i), jswzz(i));
int jidx = (jswzx[i] - 1)*(ny + 1)*(nz + 1) + (jswzy[i] - 1)*(nz + 1) + jswzz[i] - 1;
cudaStatus = cudaMemcpy(&(dev_V[j]), &(dev_Ex[jidx]), sizeof(float), cudaMemcpyDeviceToDevice);
if (cudaStatus != cudaSuccess) { printf("V cudaMemcpy failed: %s\n", cudaGetErrorString(cudaStatus)); return cudaStatus; };
// 实现MATLAB中的E_obs(:,i)=V;
cudaStatus = cudaMemcpy(&(E_obs[j][i]), &(dev_V[j]), sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) { printf("V cudaMemcpy failed: %s\n", cudaGetErrorString(cudaStatus)); return cudaStatus; };
}
}
printf("finish calc !\n");
cudaDeviceSynchronize();
return cudaStatus;
}
/************************************************************************************
* 主函数
************************************************************************************/
int main()
{
printf("c = %e\n", dt);
readAllData();
printf("Read All Data OK ! \n");
// 选择运算使用的GPU
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) { printf("cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); return 1; }
else { printf("cudaSetDevice success!\n"); }
gpu_memory_malloc();
gpu_memory_copy();
// 调用gpu运算
cudaStatus = calcWithCuda();
if (cudaStatus != cudaSuccess) { printf("calcWithCuda failed!"); return 1; }
else { printf("calcWithCudasuccess!\n"); }
gpu_memory_free();
// 重置GPU
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) { printf("cudaDeviceReset failed!"); return 1; }
// 输出结果
printE_obs();
return 0;
} |
24123db5293d7ea7bffd314f16cc8a79401ea31e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <gptl_cuda.h>
__global__ void sleep (float seconds, int outerlooplen)
{
int ret;
int blockId;
int n;
blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
n = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
if (n < outerlooplen) {
ret = GPTLmy_sleep (seconds);
}
}
| 24123db5293d7ea7bffd314f16cc8a79401ea31e.cu | #include <cuda.h>
#include <gptl_cuda.h>
__global__ void sleep (float seconds, int outerlooplen)
{
int ret;
int blockId;
int n;
blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
n = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
if (n < outerlooplen) {
ret = GPTLmy_sleep (seconds);
}
}
|
801f623fc1834551601d4f9a3225e97eeb0b2265.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <time.h>
#define BLOCKS 1024
#define THREADS 1024
#define SIZE BLOCKS*THREADS*16
__global__
void testgpu(int *memInt, int times){
int i;
for(i = 0; i < times; i++)
*memInt += (*memInt)*i;
}
int main(int argc, char *argv[]){
int *gpuInt;
dim3 block(1024, 1);
dim3 grid(1024, 1024);
printf("A %lf\n", clock() / (double) CLOCKS_PER_SEC);
hipMalloc( (void **) &gpuInt, sizeof(int));
printf("B %lf\n", clock() / (double) CLOCKS_PER_SEC);
// printf("Test 1\n");
// testgpu<<<grid, block>>>(gpuInt, 800000);
printf("C %lf\n", clock() / (double) CLOCKS_PER_SEC);
hipLaunchKernelGGL(( testgpu), dim3(8), dim3(16), 0, 0, gpuInt, 1024 * 1024 * 1024);
printf("D %lf\n", clock() / (double) CLOCKS_PER_SEC);
hipFree(gpuInt);
printf("E %lf\n", clock() / (double) CLOCKS_PER_SEC);
return 0;
}
| 801f623fc1834551601d4f9a3225e97eeb0b2265.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <time.h>
#define BLOCKS 1024
#define THREADS 1024
#define SIZE BLOCKS*THREADS*16
__global__
void testgpu(int *memInt, int times){
int i;
for(i = 0; i < times; i++)
*memInt += (*memInt)*i;
}
int main(int argc, char *argv[]){
int *gpuInt;
dim3 block(1024, 1);
dim3 grid(1024, 1024);
printf("A %lf\n", clock() / (double) CLOCKS_PER_SEC);
cudaMalloc( (void **) &gpuInt, sizeof(int));
printf("B %lf\n", clock() / (double) CLOCKS_PER_SEC);
// printf("Test 1\n");
// testgpu<<<grid, block>>>(gpuInt, 800000);
printf("C %lf\n", clock() / (double) CLOCKS_PER_SEC);
testgpu<<<8, 16>>>(gpuInt, 1024 * 1024 * 1024);
printf("D %lf\n", clock() / (double) CLOCKS_PER_SEC);
cudaFree(gpuInt);
printf("E %lf\n", clock() / (double) CLOCKS_PER_SEC);
return 0;
}
|
1f65fdd6e65fd1bb828f978a3bb9a3d49cddbc21.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_scalarAdd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double x = 1;
double *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_scalarAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_scalarAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_scalarAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1f65fdd6e65fd1bb828f978a3bb9a3d49cddbc21.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_scalarAdd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double x = 1;
double *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_scalarAdd<<<gridBlock,threadBlock>>>(n,result,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_scalarAdd<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_scalarAdd<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d76d20cb129e14ce829e81a948ac91b45e9abd23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* \brief A helper class for {@link MultiStageMeanfieldLayer} class, which is the Caffe layer that implements the
* CRF-RNN described in the paper: Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015.
*
* This class itself is not a proper Caffe layer although it behaves like one to some degree.
*
* \authors Sadeep Jayasumana, Bernardino Romera-Paredes, Shuai Zheng, Zhizhong Su.
* \version 1.0
* \date 2015
* \copyright Torr Vision Group, University of Oxford.
* \details If you use this code, please consider citing the paper:
* Shuai Zheng, Sadeep Jayasumana, Bernardino Romera-Paredes, Vibhav Vineet, Zhizhong Su, Dalong Du,
* Chang Huang, Philip H. S. Torr. Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015.
*
* For more information about CRF-RNN, please visit the project website http://crfasrnn.torr.vision.
*/
#include <vector>
#include <math.h>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/layers/loss_layer.hpp"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/res_block_layers/multi_input_pooling_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void pooling_forward_kernel(int nthreads, Dtype** bottom, Dtype* top, int bottomN) {
CUDA_KERNEL_LOOP(index, nthreads){
Dtype max_value = 1e-8;
for(int i=0; i<bottomN; i++)
{
Dtype temp_value = bottom[i][index];
if(temp_value > max_value) max_value = temp_value;
}
top[index] = max_value;
}
}
template <typename Dtype>
__global__ void pooling_backward_kernel(int nthreads,const Dtype* top_diff,
Dtype ** bottom_data,
Dtype ** bottom_diff,int bottomN) {
CUDA_KERNEL_LOOP(index, nthreads){
Dtype max_value = 1e-8;
int max_Index = 0;
for(int i=0; i<bottomN; i++)
{
Dtype temp_value = bottom_data[i][index];
if(temp_value > max_value) {
max_value = temp_value;
max_Index = i;
}
}
bottom_diff[max_Index][index] = top_diff[index];
}
}
template <typename Dtype>
void MultiInputPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
Dtype ** bottom_data = (Dtype **)malloc(sizeof(Dtype*)*bottom.size());
for(int i=0;i<bottom.size(); i++)
{
bottom_data[i] = (Dtype *) bottom[i]->gpu_data();
}
Dtype * top_data=top[0]->mutable_gpu_data();
int count = bottom[0]->count();
hipLaunchKernelGGL(( pooling_forward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, bottom.size());
free(bottom_data);
}
template <typename Dtype>
void MultiInputPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom)
{
Dtype ** bottom_data = (Dtype **)malloc(sizeof(Dtype*)*bottom.size());
for(int i=0;i<bottom.size(); i++)
{
bottom_data[i] = (Dtype *) bottom[i]->gpu_data();
}
Dtype ** bottom_diff = (Dtype **)malloc(sizeof(Dtype*)*bottom.size());
for(int i=0;i<bottom.size(); i++)
{
bottom_diff[i] = bottom[i]->mutable_gpu_diff();
}
Dtype * top_diff=(Dtype *) top[0]->gpu_diff();
int count = bottom[0]->count();
hipLaunchKernelGGL(( pooling_backward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff, bottom.size());
free(bottom_data);
free(bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(MultiInputPoolingLayer);
} // namespace caffe
| d76d20cb129e14ce829e81a948ac91b45e9abd23.cu | /*!
* \brief A helper class for {@link MultiStageMeanfieldLayer} class, which is the Caffe layer that implements the
* CRF-RNN described in the paper: Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015.
*
* This class itself is not a proper Caffe layer although it behaves like one to some degree.
*
* \authors Sadeep Jayasumana, Bernardino Romera-Paredes, Shuai Zheng, Zhizhong Su.
* \version 1.0
* \date 2015
* \copyright Torr Vision Group, University of Oxford.
* \details If you use this code, please consider citing the paper:
* Shuai Zheng, Sadeep Jayasumana, Bernardino Romera-Paredes, Vibhav Vineet, Zhizhong Su, Dalong Du,
* Chang Huang, Philip H. S. Torr. Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015.
*
* For more information about CRF-RNN, please visit the project website http://crfasrnn.torr.vision.
*/
#include <vector>
#include <math.h>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/layers/loss_layer.hpp"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/res_block_layers/multi_input_pooling_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void pooling_forward_kernel(int nthreads, Dtype** bottom, Dtype* top, int bottomN) {
CUDA_KERNEL_LOOP(index, nthreads){
Dtype max_value = 1e-8;
for(int i=0; i<bottomN; i++)
{
Dtype temp_value = bottom[i][index];
if(temp_value > max_value) max_value = temp_value;
}
top[index] = max_value;
}
}
template <typename Dtype>
__global__ void pooling_backward_kernel(int nthreads,const Dtype* top_diff,
Dtype ** bottom_data,
Dtype ** bottom_diff,int bottomN) {
CUDA_KERNEL_LOOP(index, nthreads){
Dtype max_value = 1e-8;
int max_Index = 0;
for(int i=0; i<bottomN; i++)
{
Dtype temp_value = bottom_data[i][index];
if(temp_value > max_value) {
max_value = temp_value;
max_Index = i;
}
}
bottom_diff[max_Index][index] = top_diff[index];
}
}
template <typename Dtype>
void MultiInputPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
Dtype ** bottom_data = (Dtype **)malloc(sizeof(Dtype*)*bottom.size());
for(int i=0;i<bottom.size(); i++)
{
bottom_data[i] = (Dtype *) bottom[i]->gpu_data();
}
Dtype * top_data=top[0]->mutable_gpu_data();
int count = bottom[0]->count();
pooling_forward_kernel<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>
(count, bottom_data, top_data, bottom.size());
free(bottom_data);
}
template <typename Dtype>
void MultiInputPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom)
{
Dtype ** bottom_data = (Dtype **)malloc(sizeof(Dtype*)*bottom.size());
for(int i=0;i<bottom.size(); i++)
{
bottom_data[i] = (Dtype *) bottom[i]->gpu_data();
}
Dtype ** bottom_diff = (Dtype **)malloc(sizeof(Dtype*)*bottom.size());
for(int i=0;i<bottom.size(); i++)
{
bottom_diff[i] = bottom[i]->mutable_gpu_diff();
}
Dtype * top_diff=(Dtype *) top[0]->gpu_diff();
int count = bottom[0]->count();
pooling_backward_kernel<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>
(count, top_diff, bottom_data, bottom_diff, bottom.size());
free(bottom_data);
free(bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(MultiInputPoolingLayer);
} // namespace caffe
|
51e6e5602cac51216e873ca3d91d9a33a190c7d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 1000
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
};
__device__ int julia( int x, int y ) {
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel( unsigned char *ptr ) {
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia( x, y );
ptr[offset*4 + 0] = 255 * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
HANDLE_ERROR( hipMalloc( (void**)&dev_bitmap, bitmap.image_size() ) );
data.dev_bitmap = dev_bitmap;
dim3 grid(DIM,DIM);
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(1), 0, 0, dev_bitmap );
HANDLE_ERROR( hipMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipFree( dev_bitmap ) );
bitmap.display_and_exit();
}
| 51e6e5602cac51216e873ca3d91d9a33a190c7d2.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 1000
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
};
__device__ int julia( int x, int y ) {
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel( unsigned char *ptr ) {
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia( x, y );
ptr[offset*4 + 0] = 255 * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
HANDLE_ERROR( cudaMalloc( (void**)&dev_bitmap, bitmap.image_size() ) );
data.dev_bitmap = dev_bitmap;
dim3 grid(DIM,DIM);
kernel<<<grid,1>>>( dev_bitmap );
HANDLE_ERROR( cudaMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaFree( dev_bitmap ) );
bitmap.display_and_exit();
}
|
aaf3d9015be9b962e5dc070021d30a6708e52107.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <limits>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/no_repeat_ngram_impl.cuh"
#include "include/cuda_runtime.h"
#include "include/hip/hip_fp16.h"
template <typename T>
__device__ __forceinline__ void max_val_init(T *init_val) {
*init_val = std::numeric_limits<T>::max();
}
// Handle fp16 differently for assignment
template <>
__device__ __forceinline__ void max_val_init(half *init_val) {
*init_val = __int2half_rd(65504); // Max value for Half
}
template <typename StateType, typename LogProbType>
__global__ void reassign_probability(StateType* __restrict__ tokens,
LogProbType* __restrict__ lprobs,
LogProbType* __restrict__ output,
int batch_mul_beam_size,
int vocab_size,
int no_repeat_ngram_size,
int total_blocks) {
extern __shared__ int32_t shared_mem[];
LogProbType pad_value = 0.0;
max_val_init(&pad_value);
for (size_t batch_index = blockIdx.x; batch_index < total_blocks; batch_index += gridDim.x) {
// This requires the thread id
auto position_id_in_one_batch = threadIdx.x;
auto indexed_token = batch_index * batch_mul_beam_size + position_id_in_one_batch;
auto last_ngram_tokens = blockDim.x - 1;
auto lprob_start = batch_index * vocab_size;
shared_mem[position_id_in_one_batch] = tokens[indexed_token];
if (position_id_in_one_batch == last_ngram_tokens) {
for (int i = 0; i < no_repeat_ngram_size; ++i) {
if (position_id_in_one_batch + i < batch_mul_beam_size) {
shared_mem[position_id_in_one_batch + i ] = tokens[indexed_token + i];
}
}
}
__syncthreads();
bool should_modify = true;
for (int i = 0; i < no_repeat_ngram_size - 1; ++i) {
if (shared_mem[position_id_in_one_batch + i] != shared_mem[last_ngram_tokens + i + 1]) {
should_modify = false;
break;
}
}
if (should_modify) {
// reset probability
auto id = shared_mem[position_id_in_one_batch + no_repeat_ngram_size - 1];
output[lprob_start + id] = -pad_value;
}
}
}
template <typename StateType, typename LogProbType>
__global__ void reassign_probability_no_shared(StateType* __restrict__ tokens,
LogProbType* __restrict__ lprobs,
LogProbType* __restrict__ output,
int batch_mul_beam_size,
int vocab_size,
int no_repeat_ngram_size,
int total_blocks,
int total_threads) {
extern __shared__ int32_t shared_mem[];
LogProbType pad_value = 0.0;
max_val_init(&pad_value);
for (size_t batch_index = blockIdx.x; batch_index < total_blocks; batch_index += gridDim.x) {
for (size_t thread_index = threadIdx.x; thread_index < total_threads; thread_index += blockDim.x) {
auto offsets = batch_index * batch_mul_beam_size;
auto indexed_token = offsets + thread_index;
auto last_ngram_tokens = offsets + total_threads - 1;
auto lprob_start = batch_index * vocab_size;
bool should_modify = true;
for (int i = 0; i < no_repeat_ngram_size - 1; ++i) {
if (tokens[indexed_token + i] != tokens[last_ngram_tokens + i + 1]) {
should_modify = false;
break;
}
}
if (should_modify) {
// reset probability
auto id = tokens[indexed_token + no_repeat_ngram_size - 1];
output[lprob_start + id] = -pad_value;
}
}
}
}
template <typename StateType, typename LogProbType>
void CalculateNoRepeatNGram(const StateType *tokens,
LogProbType *lprobs,
LogProbType *output,
int batch_mul_beam_size,
int no_repeat_ngram_size,
const uint32_t &device_id,
int vocab_size,
int blocks,
int shared_mem_size,
hipStream_t cuda_stream) {
int threads = batch_mul_beam_size - no_repeat_ngram_size + 2 - 1;
if (threads <= 0) return;
auto cuda_threads = CUDA_THREADS(device_id);
if (cuda_threads >= threads) {
hipLaunchKernelGGL(( reassign_probability), dim3(CUDA_BLOCKS(device_id, blocks)), dim3(threads), shared_mem_size, cuda_stream,
tokens, lprobs, output, batch_mul_beam_size, vocab_size, no_repeat_ngram_size, blocks);
} else {
hipLaunchKernelGGL(( reassign_probability_no_shared), dim3(CUDA_BLOCKS(device_id, blocks)), dim3(cuda_threads), shared_mem_size, cuda_stream,
tokens, lprobs, output, batch_mul_beam_size, vocab_size, no_repeat_ngram_size, blocks, threads);
}
}
template CUDA_LIB_EXPORT void CalculateNoRepeatNGram<int32_t, half>(const int32_t *tokens,
half *lprobs,
half *outpout,
int step,
int no_repeat_ngram_size,
const uint32_t &device_id,
int vocab_size_,
int blocks,
int shared_mem_size,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalculateNoRepeatNGram<int32_t, float>(const int32_t *tokens,
float *lprobs,
float *output,
int step,
int no_repeat_ngram_size,
const uint32_t &device_id,
int vocab_size_,
int blocks,
int shared_mem_size,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalculateNoRepeatNGram<int32_t, double>(const int32_t *tokens,
double *lprobs,
double *output,
int step,
int no_repeat_ngram_size,
const uint32_t &device_id,
int vocab_size_,
int blocks,
int shared_mem_size,
hipStream_t cuda_stream);
| aaf3d9015be9b962e5dc070021d30a6708e52107.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <limits>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/no_repeat_ngram_impl.cuh"
#include "include/cuda_runtime.h"
#include "include/cuda_fp16.h"
template <typename T>
__device__ __forceinline__ void max_val_init(T *init_val) {
*init_val = std::numeric_limits<T>::max();
}
// Handle fp16 differently for assignment
template <>
__device__ __forceinline__ void max_val_init(half *init_val) {
*init_val = __int2half_rd(65504); // Max value for Half
}
template <typename StateType, typename LogProbType>
__global__ void reassign_probability(StateType* __restrict__ tokens,
LogProbType* __restrict__ lprobs,
LogProbType* __restrict__ output,
int batch_mul_beam_size,
int vocab_size,
int no_repeat_ngram_size,
int total_blocks) {
extern __shared__ int32_t shared_mem[];
LogProbType pad_value = 0.0;
max_val_init(&pad_value);
for (size_t batch_index = blockIdx.x; batch_index < total_blocks; batch_index += gridDim.x) {
// This requires the thread id
auto position_id_in_one_batch = threadIdx.x;
auto indexed_token = batch_index * batch_mul_beam_size + position_id_in_one_batch;
auto last_ngram_tokens = blockDim.x - 1;
auto lprob_start = batch_index * vocab_size;
shared_mem[position_id_in_one_batch] = tokens[indexed_token];
if (position_id_in_one_batch == last_ngram_tokens) {
for (int i = 0; i < no_repeat_ngram_size; ++i) {
if (position_id_in_one_batch + i < batch_mul_beam_size) {
shared_mem[position_id_in_one_batch + i ] = tokens[indexed_token + i];
}
}
}
__syncthreads();
bool should_modify = true;
for (int i = 0; i < no_repeat_ngram_size - 1; ++i) {
if (shared_mem[position_id_in_one_batch + i] != shared_mem[last_ngram_tokens + i + 1]) {
should_modify = false;
break;
}
}
if (should_modify) {
// reset probability
auto id = shared_mem[position_id_in_one_batch + no_repeat_ngram_size - 1];
output[lprob_start + id] = -pad_value;
}
}
}
template <typename StateType, typename LogProbType>
__global__ void reassign_probability_no_shared(StateType* __restrict__ tokens,
LogProbType* __restrict__ lprobs,
LogProbType* __restrict__ output,
int batch_mul_beam_size,
int vocab_size,
int no_repeat_ngram_size,
int total_blocks,
int total_threads) {
extern __shared__ int32_t shared_mem[];
LogProbType pad_value = 0.0;
max_val_init(&pad_value);
for (size_t batch_index = blockIdx.x; batch_index < total_blocks; batch_index += gridDim.x) {
for (size_t thread_index = threadIdx.x; thread_index < total_threads; thread_index += blockDim.x) {
auto offsets = batch_index * batch_mul_beam_size;
auto indexed_token = offsets + thread_index;
auto last_ngram_tokens = offsets + total_threads - 1;
auto lprob_start = batch_index * vocab_size;
bool should_modify = true;
for (int i = 0; i < no_repeat_ngram_size - 1; ++i) {
if (tokens[indexed_token + i] != tokens[last_ngram_tokens + i + 1]) {
should_modify = false;
break;
}
}
if (should_modify) {
// reset probability
auto id = tokens[indexed_token + no_repeat_ngram_size - 1];
output[lprob_start + id] = -pad_value;
}
}
}
}
template <typename StateType, typename LogProbType>
void CalculateNoRepeatNGram(const StateType *tokens,
LogProbType *lprobs,
LogProbType *output,
int batch_mul_beam_size,
int no_repeat_ngram_size,
const uint32_t &device_id,
int vocab_size,
int blocks,
int shared_mem_size,
cudaStream_t cuda_stream) {
int threads = batch_mul_beam_size - no_repeat_ngram_size + 2 - 1;
if (threads <= 0) return;
auto cuda_threads = CUDA_THREADS(device_id);
if (cuda_threads >= threads) {
reassign_probability<<<CUDA_BLOCKS(device_id, blocks), threads, shared_mem_size, cuda_stream>>>(
tokens, lprobs, output, batch_mul_beam_size, vocab_size, no_repeat_ngram_size, blocks);
} else {
reassign_probability_no_shared<<<CUDA_BLOCKS(device_id, blocks), cuda_threads, shared_mem_size, cuda_stream>>>(
tokens, lprobs, output, batch_mul_beam_size, vocab_size, no_repeat_ngram_size, blocks, threads);
}
}
template CUDA_LIB_EXPORT void CalculateNoRepeatNGram<int32_t, half>(const int32_t *tokens,
half *lprobs,
half *outpout,
int step,
int no_repeat_ngram_size,
const uint32_t &device_id,
int vocab_size_,
int blocks,
int shared_mem_size,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalculateNoRepeatNGram<int32_t, float>(const int32_t *tokens,
float *lprobs,
float *output,
int step,
int no_repeat_ngram_size,
const uint32_t &device_id,
int vocab_size_,
int blocks,
int shared_mem_size,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalculateNoRepeatNGram<int32_t, double>(const int32_t *tokens,
double *lprobs,
double *output,
int step,
int no_repeat_ngram_size,
const uint32_t &device_id,
int vocab_size_,
int blocks,
int shared_mem_size,
cudaStream_t cuda_stream);
|
cc5ea8b8c13d66371eb08774662e6e34ea284abe.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_cooperative_groups.h>
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include <iostream>
using namespace cooperative_groups;
// Basic reduction code found in the presentation; going to test on a variety of
// thread groups
__device__ void coalesced_thread_ids(coalesced_group threads)
{
printf("Thread ID: %d Block ID: %d Coalesced Thread Rank: %d \n", threadIdx.x, blockIdx.x, threads.thread_rank());
}
// use this kernel to get appropriate threads ??
__global__ void descriminator_kernel()
{
// first block and first warp in block
if (threadIdx.x % 5 == 0)
{
coalesced_group active_threads = coalesced_threads();
coalesced_thread_ids(active_threads);
}
}
int main(void)
{
// Setup timers to test different configurations
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start); // start timer
// run the kernel
hipLaunchKernelGGL(( descriminator_kernel), dim3(2), dim3(64), 0, 0, );
hipEventRecord(stop); // stop timing
// get the runtime
hipEventSynchronize(stop);
float milliseconds = 0.0;
hipEventElapsedTime(&milliseconds, start, stop);
// print the runtime
std::cout << milliseconds << " milliseconds for parallel run" << std::endl;
return 0;
}
| cc5ea8b8c13d66371eb08774662e6e34ea284abe.cu | #include <cooperative_groups.h>
#include <stdio.h>
#include "cuda.h"
#include "cuda_runtime.h"
#include <iostream>
using namespace cooperative_groups;
// Basic reduction code found in the presentation; going to test on a variety of
// thread groups
__device__ void coalesced_thread_ids(coalesced_group threads)
{
printf("Thread ID: %d Block ID: %d Coalesced Thread Rank: %d \n", threadIdx.x, blockIdx.x, threads.thread_rank());
}
// use this kernel to get appropriate threads ??
__global__ void descriminator_kernel()
{
// first block and first warp in block
if (threadIdx.x % 5 == 0)
{
coalesced_group active_threads = coalesced_threads();
coalesced_thread_ids(active_threads);
}
}
int main(void)
{
// Setup timers to test different configurations
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start); // start timer
// run the kernel
descriminator_kernel<<<2, 64>>>();
cudaEventRecord(stop); // stop timing
// get the runtime
cudaEventSynchronize(stop);
float milliseconds = 0.0;
cudaEventElapsedTime(&milliseconds, start, stop);
// print the runtime
std::cout << milliseconds << " milliseconds for parallel run" << std::endl;
return 0;
}
|
4d3e095a2935310e6b021f13e319e0edbd514424.hip | // !!! This is a file automatically generated by hipify!!!
/*
mallocMC: Memory Allocator for Many Core Architectures.
https://www.hzdr.de/crp
Copyright 2014 Institute of Radiation Physics,
Helmholtz-Zentrum Dresden - Rossendorf
Author(s): Carlchristian Eckert - c.eckert ( at ) hzdr.de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// get a CUDA error and print it nicely
#define CUDA_CHECK(cmd) {hipError_t error = cmd; \
if(error!=hipSuccess){\
printf("<%s>:%i ",__FILE__,__LINE__);\
printf("[CUDA] Error: %s\n", hipGetErrorString(error));}}
// start kernel, wait for finish and check errors
#define CUDA_CHECK_KERNEL_SYNC(...) __VA_ARGS__;CUDA_CHECK(hipDeviceSynchronize())
// each pointer in the datastructure will point to this many
// elements of type allocElem_t
#define ELEMS_PER_SLOT 750
#include <hip/hip_runtime.h>
#include <iostream>
#include <cstdio>
#include <typeinfo>
#include <vector>
//include the Heap with the arguments given in the config
#include "src/include/mallocMC/mallocMC_utils.hpp"
#include "verify_heap_config.hpp"
// global variable for verbosity, might change due to user input '--verbose'
bool verbose = false;
// the type of the elements to allocate
typedef unsigned long long allocElem_t;
bool run_heap_verification(const size_t, const unsigned, const unsigned, const bool);
void parse_cmdline(const int, char**, size_t*, unsigned*, unsigned*, bool*);
void print_help(char**);
// used to create an empty stream for non-verbose output
struct nullstream : std::ostream {
nullstream() : std::ostream(0) { }
};
// uses global verbosity to switch between std::cout and a NULL-output
std::ostream& dout() {
static nullstream n;
return verbose ? std::cout : n;
}
// define some defaults
BOOST_STATIC_CONSTEXPR unsigned threads_default = 128;
BOOST_STATIC_CONSTEXPR unsigned blocks_default = 64;
BOOST_STATIC_CONSTEXPR size_t heapInMB_default = 1024; // 1GB
/**
* will do a basic verification of scatterAlloc.
*
* @param argv if -q or --quiet is supplied as a
* command line argument, verbosity will be reduced
*
* @return will return 0 if the verification was successful,
* otherwise returns 1
*/
int main(int argc, char** argv){
bool correct = false;
bool machine_readable = false;
size_t heapInMB = heapInMB_default;
unsigned threads = threads_default;
unsigned blocks = blocks_default;
parse_cmdline(argc, argv, &heapInMB, &threads, &blocks, &machine_readable);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
if( deviceProp.major < 2 ) {
std::cerr << "Error: Compute Capability >= 2.0 required. (is ";
std::cerr << deviceProp.major << "."<< deviceProp.minor << ")" << std::endl;
return 1;
}
hipSetDevice(0);
correct = run_heap_verification(heapInMB, threads, blocks, machine_readable);
hipDeviceReset();
if(!machine_readable || verbose){
if(correct){
std::cout << "\033[0;32mverification successful \033[0m" << std::endl;
return 0;
}else{
std::cerr << "\033[0;31mverification failed\033[0m" << std::endl;
return 1;
}
}
}
/**
* will parse command line arguments
*
* for more details, see print_help()
*
* @param argc argc from main()
* @param argv argv from main()
* @param heapInMP will be filled with the heapsize, if given as a parameter
* @param threads will be filled with number of threads, if given as a parameter
* @param blocks will be filled with number of blocks, if given as a parameter
*/
void parse_cmdline(
const int argc,
char**argv,
size_t *heapInMB,
unsigned *threads,
unsigned *blocks,
bool *machine_readable
){
std::vector<std::pair<std::string, std::string> > parameters;
// Parse Commandline, tokens are shaped like ARG=PARAM or ARG
// This requires to use '=', if you want to supply a value with a parameter
for (int i = 1; i < argc; ++i) {
char* pos = strtok(argv[i], "=");
std::pair < std::string, std::string > p(std::string(pos), std::string(""));
pos = strtok(NULL, "=");
if (pos != NULL) {
p.second = std::string(pos);
}
parameters.push_back(p);
}
// go through all parameters that were found
for (unsigned i = 0; i < parameters.size(); ++i) {
std::pair < std::string, std::string > p = parameters.at(i);
if (p.first == "-v" || p.first == "--verbose") {
verbose = true;
}
if (p.first == "--threads") {
*threads = atoi(p.second.c_str());
}
if (p.first == "--blocks") {
*blocks = atoi(p.second.c_str());
}
if(p.first == "--heapsize") {
*heapInMB = size_t(atoi(p.second.c_str()));
}
if(p.first == "-h" || p.first == "--help"){
print_help(argv);
exit(0);
}
if(p.first == "-m" || p.first == "--machine_readable"){
*machine_readable = true;
}
}
}
/**
* prints a helpful message about program use
*
* @param argv the argv-parameter from main, used to find the program name
*/
void print_help(char** argv){
std::stringstream s;
s << "SYNOPSIS:" << std::endl;
s << argv[0] << " [OPTIONS]" << std::endl;
s << "" << std::endl;
s << "OPTIONS:" << std::endl;
s << " -h, --help" << std::endl;
s << " Print this help message and exit" << std::endl;
s << "" << std::endl;
s << " -v, --verbose" << std::endl;
s << " Print information about parameters and progress" << std::endl;
s << "" << std::endl;
s << " -m, --machine_readable" << std::endl;
s << " Print all relevant parameters as CSV. This will" << std::endl;
s << " suppress all other output unless explicitly" << std::endl;
s << " requested with --verbose or -v" << std::endl;
s << "" << std::endl;
s << " --threads=N" << std::endl;
s << " Set the number of threads per block (default " ;
s << threads_default << "128)" << std::endl;
s << "" << std::endl;
s << " --blocks=N" << std::endl;
s << " Set the number of blocks in the grid (default " ;
s << blocks_default << ")" << std::endl;
s << "" << std::endl;
s << " --heapsize=N" << std::endl;
s << " Set the heapsize to N Megabyte (default " ;
s << heapInMB_default << "1024)" << std::endl;
std::cout << s.str();
}
/**
* checks validity of memory for each single cell
*
* checks on a per thread basis, if the values written during
* allocation are still the same. Also calculates the sum over
* all allocated values for a more in-depth verification that
* could be done on the host
*
* @param data the data to verify
* @param counter should be initialized with 0 and will
* be used to count how many verifications were
* already done
* @param globalSum will be filled with the sum over all
* allocated values in the structure
* @param nSlots the size of the datastructure
* @param correct should be initialized with 1.
* Will change to 0, if there was a value that didn't match
*/
__global__ void check_content(
allocElem_t** data,
unsigned long long *counter,
unsigned long long* globalSum,
const size_t nSlots,
int* correct
){
unsigned long long sum=0;
while(true){
size_t pos = atomicAdd(counter,1);
if(pos >= nSlots){break;}
const size_t offset = pos*ELEMS_PER_SLOT;
for(size_t i=0;i<ELEMS_PER_SLOT;++i){
if (static_cast<allocElem_t>(data[pos][i]) != static_cast<allocElem_t>(offset+i)){
//printf("\nError in Kernel: data[%llu][%llu] is %#010x (should be %#010x)\n",
// pos,i,static_cast<allocElem_t>(data[pos][i]),allocElem_t(offset+i));
atomicAnd(correct,0);
}
sum += static_cast<unsigned long long>(data[pos][i]);
}
}
atomicAdd(globalSum,sum);
}
/**
* checks validity of memory for each single cell
*
* checks on a per thread basis, if the values written during
* allocation are still the same.
*
* @param data the data to verify
* @param counter should be initialized with 0 and will
* be used to count how many verifications were
* already done
* @param nSlots the size of the datastructure
* @param correct should be initialized with 1.
* Will change to 0, if there was a value that didn't match
*/
__global__ void check_content_fast(
allocElem_t** data,
unsigned long long *counter,
const size_t nSlots,
int* correct
){
int c = 1;
while(true){
size_t pos = atomicAdd(counter,1);
if(pos >= nSlots){break;}
const size_t offset = pos*ELEMS_PER_SLOT;
for(size_t i=0;i<ELEMS_PER_SLOT;++i){
if (static_cast<allocElem_t>(data[pos][i]) != static_cast<allocElem_t>(offset+i)){
c=0;
}
}
}
atomicAnd(correct,c);
}
/**
* allocate a lot of small arrays and fill them
*
* Each array has the size ELEMS_PER_SLOT and the type allocElem_t.
* Each element will be filled with a number that is related to its
* position in the datastructure.
*
* @param data the datastructure to allocate
* @param counter should be initialized with 0 and will
* hold, how many allocations were done
* @param globalSum will hold the sum of all values over all
* allocated structures (for verification purposes)
*/
__global__ void allocAll(
allocElem_t** data,
unsigned long long* counter,
unsigned long long* globalSum,
ScatterAllocator::AllocatorHandle mMC
){
unsigned long long sum=0;
while(true){
allocElem_t* p = (allocElem_t*) mMC.malloc(sizeof(allocElem_t) * ELEMS_PER_SLOT);
if(p == NULL) break;
size_t pos = atomicAdd(counter,1);
const size_t offset = pos*ELEMS_PER_SLOT;
for(size_t i=0;i<ELEMS_PER_SLOT;++i){
p[i] = static_cast<allocElem_t>(offset + i);
sum += static_cast<unsigned long long>(p[i]);
}
data[pos] = p;
}
atomicAdd(globalSum,sum);
}
/**
* free all the values again
*
* @param data the datastructure to free
* @param counter should be an empty space on device memory,
* counts how many elements were freed
* @param max the maximum number of elements to free
*/
__global__ void deallocAll(
allocElem_t** data,
unsigned long long* counter,
const size_t nSlots,
ScatterAllocator::AllocatorHandle mMC
){
while(true){
size_t pos = atomicAdd(counter,1);
if(pos >= nSlots) break;
mMC.free(data[pos]);
}
}
/**
* damages one element in the data
*
* With help of this function, you can verify that
* the checks actually work as expected and can find
* an error, if one should exist
*
* @param data the datastructure to damage
*/
__global__ void damageElement(allocElem_t** data){
data[1][0] = static_cast<allocElem_t>(5*ELEMS_PER_SLOT - 1);
}
/**
* wrapper function to allocate memory on device
*
* allocates memory with mallocMC. Returns the number of
* created elements as well as the sum of these elements
*
* @param d_testData the datastructure which will hold
* pointers to the created elements
* @param h_nSlots will be filled with the number of elements
* that were allocated
* @param h_sum will be filled with the sum of all elements created
* @param blocks the size of the CUDA grid
* @param threads the number of CUDA threads per block
*/
void allocate(
allocElem_t** d_testData,
unsigned long long* h_nSlots,
unsigned long long* h_sum,
const unsigned blocks,
const unsigned threads,
ScatterAllocator mMC
){
dout() << "allocating on device...";
unsigned long long zero = 0;
unsigned long long *d_sum;
unsigned long long *d_nSlots;
MALLOCMC_CUDA_CHECKED_CALL(hipMalloc((void**) &d_sum,sizeof(unsigned long long)));
MALLOCMC_CUDA_CHECKED_CALL(hipMalloc((void**) &d_nSlots, sizeof(unsigned long long)));
MALLOCMC_CUDA_CHECKED_CALL(hipMemcpy(d_sum,&zero,sizeof(unsigned long long),hipMemcpyHostToDevice));
MALLOCMC_CUDA_CHECKED_CALL(hipMemcpy(d_nSlots,&zero,sizeof(unsigned long long),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( CUDA_CHECK_KERNEL_SYNC(allocAll), dim3(blocks),dim3(threads), 0, 0, d_testData, d_nSlots, d_sum, mMC ));
MALLOCMC_CUDA_CHECKED_CALL(hipMemcpy(h_sum,d_sum,sizeof(unsigned long long),hipMemcpyDeviceToHost));
MALLOCMC_CUDA_CHECKED_CALL(hipMemcpy(h_nSlots,d_nSlots,sizeof(unsigned long long),hipMemcpyDeviceToHost));
hipFree(d_sum);
hipFree(d_nSlots);
dout() << "done" << std::endl;
}
/**
* Wrapper function to verify allocation on device
*
* Generates the same number that was written into each position of
* the datastructure during allocation and compares the values.
*
* @param d_testData the datastructure which holds
* pointers to the elements you want to verify
* @param nSlots the size of d_testData
* @param blocks the size of the CUDA grid
* @param threads the number of CUDA threads per block
* @return true if the verification was successful, false otherwise
*/
bool verify(
allocElem_t **d_testData,
const unsigned long long nSlots,
const unsigned blocks,
const unsigned threads
){
dout() << "verifying on device... ";
const unsigned long long zero = 0;
int h_correct = 1;
int* d_correct;
unsigned long long *d_sum;
unsigned long long *d_counter;
MALLOCMC_CUDA_CHECKED_CALL(hipMalloc((void**) &d_sum, sizeof(unsigned long long)));
MALLOCMC_CUDA_CHECKED_CALL(hipMalloc((void**) &d_counter, sizeof(unsigned long long)));
MALLOCMC_CUDA_CHECKED_CALL(hipMalloc((void**) &d_correct, sizeof(int)));
MALLOCMC_CUDA_CHECKED_CALL(hipMemcpy(d_sum,&zero,sizeof(unsigned long long),hipMemcpyHostToDevice));
MALLOCMC_CUDA_CHECKED_CALL(hipMemcpy(d_counter,&zero,sizeof(unsigned long long),hipMemcpyHostToDevice));
MALLOCMC_CUDA_CHECKED_CALL(hipMemcpy(d_correct,&h_correct,sizeof(int),hipMemcpyHostToDevice));
// can be replaced by a call to check_content_fast,
// if the gaussian sum (see below) is not used and you
// want to be a bit faster
hipLaunchKernelGGL(( CUDA_CHECK_KERNEL_SYNC(check_content), dim3(blocks),dim3(threads), 0, 0,
d_testData,
d_counter,
d_sum,
static_cast<size_t>(nSlots),
d_correct
));
MALLOCMC_CUDA_CHECKED_CALL(hipMemcpy(&h_correct,d_correct,sizeof(int),hipMemcpyDeviceToHost));
// This only works, if the type "allocElem_t"
// can hold all the IDs (usually unsigned long long)
/*
dout() << "verifying on host...";
unsigned long long h_sum, h_counter;
unsigned long long gaussian_sum = (ELEMS_PER_SLOT*nSlots * (ELEMS_PER_SLOT*nSlots-1))/2;
MALLOCMC_CUDA_CHECKED_CALL(hipMemcpy(&h_sum,d_sum,sizeof(unsigned long long),hipMemcpyDeviceToHost));
MALLOCMC_CUDA_CHECKED_CALL(hipMemcpy(&h_counter,d_counter,sizeof(unsigned long long),hipMemcpyDeviceToHost));
if(gaussian_sum != h_sum){
dout() << "\nGaussian Sum doesn't match: is " << h_sum;
dout() << " (should be " << gaussian_sum << ")" << std::endl;
h_correct=false;
}
if(nSlots != h_counter-(blocks*threads)){
dout() << "\nallocated number of elements doesn't match: is " << h_counter;
dout() << " (should be " << nSlots << ")" << std::endl;
h_correct=false;
}
*/
if(h_correct){
dout() << "done" << std::endl;
}else{
dout() << "failed" << std::endl;
}
hipFree(d_correct);
hipFree(d_sum);
hipFree(d_counter);
return static_cast<bool>(h_correct);
}
/**
* prints all parameters machine readable
*
* for params, see run_heap_verification-internal parameters
*/
void print_machine_readable(
const unsigned pagesize,
const unsigned accessblocks,
const unsigned regionsize,
const unsigned wastefactor,
const bool resetfreedpages,
const unsigned blocks,
const unsigned threads,
const unsigned elemsPerSlot,
const size_t allocElemSize,
const size_t heapSize,
const size_t maxSpace,
const size_t maxSlots,
const unsigned long long usedSlots,
const float allocFrac,
const size_t wasted,
const bool correct
){
std::string sep = ",";
std::stringstream h;
std::stringstream v;
h << "PagesizeByte" << sep;
v << pagesize << sep;
h << "Accessblocks" << sep;
v << accessblocks << sep;
h << "Regionsize" << sep;
v << regionsize << sep;
h << "Wastefactor" << sep;
v << wasted << sep;
h << "ResetFreedPage" << sep;
v << resetfreedpages << sep;
h << "Gridsize" << sep;
v << blocks << sep;
h << "Blocksize" << sep;
v << threads << sep;
h << "ELEMS_PER_SLOT" << sep;
v << elemsPerSlot << sep;
h << "allocElemByte" << sep;
v << allocElemSize << sep;
h << "heapsizeByte" << sep;
v << heapSize << sep;
h << "maxSpaceByte" << sep;
v << maxSpace << sep;
h << "maxSlots" << sep;
v << maxSlots << sep;
h << "usedSlots" << sep;
v << usedSlots << sep;
h << "allocFraction" << sep;
v << allocFrac << sep;
h << "wastedBytes" << sep;
v << wasted << sep;
h << "correct" ;
v << correct ;
std::cout << h.str() << std::endl;
std::cout << v.str() << std::endl;
}
/**
* Verify the heap allocation of mallocMC
*
* Allocates as much memory as the heap allows. Make sure that allocated
* memory actually holds the correct values without corrupting them. Will
* fill the datastructure with values that are relative to the index and
* later evalute, if the values inside stayed the same after allocating all
* memory.
* Datastructure: Array that holds up to nPointers pointers to arrays of size
* ELEMS_PER_SLOT, each being of type allocElem_t.
*
* @return true if the verification was successful,
* false otherwise
*/
bool run_heap_verification(
const size_t heapMB,
const unsigned blocks,
const unsigned threads,
const bool machine_readable
){
hipSetDeviceFlags(hipDeviceMapHost);
const size_t heapSize = size_t(1024U*1024U) * heapMB;
const size_t slotSize = sizeof(allocElem_t)*ELEMS_PER_SLOT;
const size_t nPointers = ceil(static_cast<float>(heapSize) / slotSize);
const size_t maxSlots = heapSize/slotSize;
const size_t maxSpace = maxSlots*slotSize + nPointers*sizeof(allocElem_t*);
bool correct = true;
const unsigned long long zero = 0;
dout() << "CreationPolicy Arguments:" << std::endl;
dout() << "Pagesize: " << ScatterConfig::pagesize::value << std::endl;
dout() << "Accessblocks: " << ScatterConfig::accessblocks::value << std::endl;
dout() << "Regionsize: " << ScatterConfig::regionsize::value << std::endl;
dout() << "Wastefactor: " << ScatterConfig::wastefactor::value << std::endl;
dout() << "ResetFreedPages " << ScatterConfig::resetfreedpages::value << std::endl;
dout() << "" << std::endl;
dout() << "Gridsize: " << blocks << std::endl;
dout() << "Blocksize: " << threads << std::endl;
dout() << "Allocated elements: " << ELEMS_PER_SLOT << " x " << sizeof(allocElem_t);
dout() << " Byte (" << slotSize << " Byte)" << std::endl;
dout() << "Heap: " << heapSize << " Byte";
dout() << " (" << heapSize/pow(1024,2) << " MByte)" << std::endl;
dout() << "max space w/ pointers: " << maxSpace << " Byte";
dout() << " (" << maxSpace/pow(1024,2) << " MByte)" << std::endl;
dout() << "maximum of elements: " << maxSlots << std::endl;
// initializing the heap
ScatterAllocator mMC(heapSize);
allocElem_t** d_testData;
MALLOCMC_CUDA_CHECKED_CALL(hipMalloc((void**) &d_testData, nPointers*sizeof(allocElem_t*)));
// allocating with mallocMC
unsigned long long usedSlots = 0;
unsigned long long sumAllocElems = 0;
allocate(d_testData, &usedSlots, &sumAllocElems, blocks, threads, mMC);
const float allocFrac = static_cast<float>(usedSlots)*100/maxSlots;
const size_t wasted = heapSize - static_cast<size_t>(usedSlots) * slotSize;
dout() << "allocated elements: " << usedSlots;
dout() << " (" << allocFrac << "%)" << std::endl;
dout() << "wasted heap space: " << wasted << " Byte";
dout() << " (" << wasted/pow(1024,2) << " MByte)" << std::endl;
// verifying on device
correct = correct && verify(d_testData,usedSlots,blocks,threads);
// damaging one cell
dout() << "damaging of element... ";
hipLaunchKernelGGL(( CUDA_CHECK_KERNEL_SYNC(damageElement), dim3(1),dim3(1), 0, 0, d_testData));
dout() << "done" << std::endl;
// verifying on device
// THIS SHOULD FAIL (damage was done before!). Therefore, we must inverse the logic
correct = correct && !verify(d_testData,usedSlots,blocks,threads);
// release all memory
dout() << "deallocation... ";
unsigned long long* d_dealloc_counter;
MALLOCMC_CUDA_CHECKED_CALL(hipMalloc((void**) &d_dealloc_counter, sizeof(unsigned long long)));
MALLOCMC_CUDA_CHECKED_CALL(hipMemcpy(d_dealloc_counter,&zero,sizeof(unsigned long long),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( CUDA_CHECK_KERNEL_SYNC(deallocAll), dim3(blocks),dim3(threads), 0, 0, d_testData,d_dealloc_counter,static_cast<size_t>(usedSlots), mMC ));
hipFree(d_dealloc_counter);
hipFree(d_testData);
mMC.finalizeHeap();
dout() << "done "<< std::endl;
if(machine_readable){
print_machine_readable(
ScatterConfig::pagesize::value,
ScatterConfig::accessblocks::value,
ScatterConfig::regionsize::value,
ScatterConfig::wastefactor::value,
ScatterConfig::resetfreedpages::value,
blocks,
threads,
ELEMS_PER_SLOT,
sizeof(allocElem_t),
heapSize,
maxSpace,
maxSlots,
usedSlots,
allocFrac,
wasted,
correct
);
}
return correct;
}
| 4d3e095a2935310e6b021f13e319e0edbd514424.cu | /*
mallocMC: Memory Allocator for Many Core Architectures.
https://www.hzdr.de/crp
Copyright 2014 Institute of Radiation Physics,
Helmholtz-Zentrum Dresden - Rossendorf
Author(s): Carlchristian Eckert - c.eckert ( at ) hzdr.de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// get a CUDA error and print it nicely
#define CUDA_CHECK(cmd) {cudaError_t error = cmd; \
if(error!=cudaSuccess){\
printf("<%s>:%i ",__FILE__,__LINE__);\
printf("[CUDA] Error: %s\n", cudaGetErrorString(error));}}
// start kernel, wait for finish and check errors
#define CUDA_CHECK_KERNEL_SYNC(...) __VA_ARGS__;CUDA_CHECK(cudaDeviceSynchronize())
// each pointer in the datastructure will point to this many
// elements of type allocElem_t
#define ELEMS_PER_SLOT 750
#include <cuda.h>
#include <iostream>
#include <cstdio>
#include <typeinfo>
#include <vector>
//include the Heap with the arguments given in the config
#include "src/include/mallocMC/mallocMC_utils.hpp"
#include "verify_heap_config.hpp"
// global variable for verbosity, might change due to user input '--verbose'
bool verbose = false;
// the type of the elements to allocate
typedef unsigned long long allocElem_t;
bool run_heap_verification(const size_t, const unsigned, const unsigned, const bool);
void parse_cmdline(const int, char**, size_t*, unsigned*, unsigned*, bool*);
void print_help(char**);
// used to create an empty stream for non-verbose output
struct nullstream : std::ostream {
nullstream() : std::ostream(0) { }
};
// uses global verbosity to switch between std::cout and a NULL-output
std::ostream& dout() {
static nullstream n;
return verbose ? std::cout : n;
}
// define some defaults
BOOST_STATIC_CONSTEXPR unsigned threads_default = 128;
BOOST_STATIC_CONSTEXPR unsigned blocks_default = 64;
BOOST_STATIC_CONSTEXPR size_t heapInMB_default = 1024; // 1GB
/**
* will do a basic verification of scatterAlloc.
*
* @param argv if -q or --quiet is supplied as a
* command line argument, verbosity will be reduced
*
* @return will return 0 if the verification was successful,
* otherwise returns 1
*/
int main(int argc, char** argv){
bool correct = false;
bool machine_readable = false;
size_t heapInMB = heapInMB_default;
unsigned threads = threads_default;
unsigned blocks = blocks_default;
parse_cmdline(argc, argv, &heapInMB, &threads, &blocks, &machine_readable);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
if( deviceProp.major < 2 ) {
std::cerr << "Error: Compute Capability >= 2.0 required. (is ";
std::cerr << deviceProp.major << "."<< deviceProp.minor << ")" << std::endl;
return 1;
}
cudaSetDevice(0);
correct = run_heap_verification(heapInMB, threads, blocks, machine_readable);
cudaDeviceReset();
if(!machine_readable || verbose){
if(correct){
std::cout << "\033[0;32mverification successful ✔\033[0m" << std::endl;
return 0;
}else{
std::cerr << "\033[0;31mverification failed\033[0m" << std::endl;
return 1;
}
}
}
/**
* will parse command line arguments
*
* for more details, see print_help()
*
* @param argc argc from main()
* @param argv argv from main()
* @param heapInMP will be filled with the heapsize, if given as a parameter
* @param threads will be filled with number of threads, if given as a parameter
* @param blocks will be filled with number of blocks, if given as a parameter
*/
void parse_cmdline(
const int argc,
char**argv,
size_t *heapInMB,
unsigned *threads,
unsigned *blocks,
bool *machine_readable
){
std::vector<std::pair<std::string, std::string> > parameters;
// Parse Commandline, tokens are shaped like ARG=PARAM or ARG
// This requires to use '=', if you want to supply a value with a parameter
for (int i = 1; i < argc; ++i) {
char* pos = strtok(argv[i], "=");
std::pair < std::string, std::string > p(std::string(pos), std::string(""));
pos = strtok(NULL, "=");
if (pos != NULL) {
p.second = std::string(pos);
}
parameters.push_back(p);
}
// go through all parameters that were found
for (unsigned i = 0; i < parameters.size(); ++i) {
std::pair < std::string, std::string > p = parameters.at(i);
if (p.first == "-v" || p.first == "--verbose") {
verbose = true;
}
if (p.first == "--threads") {
*threads = atoi(p.second.c_str());
}
if (p.first == "--blocks") {
*blocks = atoi(p.second.c_str());
}
if(p.first == "--heapsize") {
*heapInMB = size_t(atoi(p.second.c_str()));
}
if(p.first == "-h" || p.first == "--help"){
print_help(argv);
exit(0);
}
if(p.first == "-m" || p.first == "--machine_readable"){
*machine_readable = true;
}
}
}
/**
* prints a helpful message about program use
*
* @param argv the argv-parameter from main, used to find the program name
*/
void print_help(char** argv){
std::stringstream s;
s << "SYNOPSIS:" << std::endl;
s << argv[0] << " [OPTIONS]" << std::endl;
s << "" << std::endl;
s << "OPTIONS:" << std::endl;
s << " -h, --help" << std::endl;
s << " Print this help message and exit" << std::endl;
s << "" << std::endl;
s << " -v, --verbose" << std::endl;
s << " Print information about parameters and progress" << std::endl;
s << "" << std::endl;
s << " -m, --machine_readable" << std::endl;
s << " Print all relevant parameters as CSV. This will" << std::endl;
s << " suppress all other output unless explicitly" << std::endl;
s << " requested with --verbose or -v" << std::endl;
s << "" << std::endl;
s << " --threads=N" << std::endl;
s << " Set the number of threads per block (default " ;
s << threads_default << "128)" << std::endl;
s << "" << std::endl;
s << " --blocks=N" << std::endl;
s << " Set the number of blocks in the grid (default " ;
s << blocks_default << ")" << std::endl;
s << "" << std::endl;
s << " --heapsize=N" << std::endl;
s << " Set the heapsize to N Megabyte (default " ;
s << heapInMB_default << "1024)" << std::endl;
std::cout << s.str();
}
/**
* checks validity of memory for each single cell
*
* checks on a per thread basis, if the values written during
* allocation are still the same. Also calculates the sum over
* all allocated values for a more in-depth verification that
* could be done on the host
*
* @param data the data to verify
* @param counter should be initialized with 0 and will
* be used to count how many verifications were
* already done
* @param globalSum will be filled with the sum over all
* allocated values in the structure
* @param nSlots the size of the datastructure
* @param correct should be initialized with 1.
* Will change to 0, if there was a value that didn't match
*/
__global__ void check_content(
allocElem_t** data,
unsigned long long *counter,
unsigned long long* globalSum,
const size_t nSlots,
int* correct
){
unsigned long long sum=0;
while(true){
size_t pos = atomicAdd(counter,1);
if(pos >= nSlots){break;}
const size_t offset = pos*ELEMS_PER_SLOT;
for(size_t i=0;i<ELEMS_PER_SLOT;++i){
if (static_cast<allocElem_t>(data[pos][i]) != static_cast<allocElem_t>(offset+i)){
//printf("\nError in Kernel: data[%llu][%llu] is %#010x (should be %#010x)\n",
// pos,i,static_cast<allocElem_t>(data[pos][i]),allocElem_t(offset+i));
atomicAnd(correct,0);
}
sum += static_cast<unsigned long long>(data[pos][i]);
}
}
atomicAdd(globalSum,sum);
}
/**
* checks validity of memory for each single cell
*
* checks on a per thread basis, if the values written during
* allocation are still the same.
*
* @param data the data to verify
* @param counter should be initialized with 0 and will
* be used to count how many verifications were
* already done
* @param nSlots the size of the datastructure
* @param correct should be initialized with 1.
* Will change to 0, if there was a value that didn't match
*/
__global__ void check_content_fast(
allocElem_t** data,
unsigned long long *counter,
const size_t nSlots,
int* correct
){
int c = 1;
while(true){
size_t pos = atomicAdd(counter,1);
if(pos >= nSlots){break;}
const size_t offset = pos*ELEMS_PER_SLOT;
for(size_t i=0;i<ELEMS_PER_SLOT;++i){
if (static_cast<allocElem_t>(data[pos][i]) != static_cast<allocElem_t>(offset+i)){
c=0;
}
}
}
atomicAnd(correct,c);
}
/**
* allocate a lot of small arrays and fill them
*
* Each array has the size ELEMS_PER_SLOT and the type allocElem_t.
* Each element will be filled with a number that is related to its
* position in the datastructure.
*
* @param data the datastructure to allocate
* @param counter should be initialized with 0 and will
* hold, how many allocations were done
* @param globalSum will hold the sum of all values over all
* allocated structures (for verification purposes)
*/
__global__ void allocAll(
allocElem_t** data,
unsigned long long* counter,
unsigned long long* globalSum,
ScatterAllocator::AllocatorHandle mMC
){
unsigned long long sum=0;
while(true){
allocElem_t* p = (allocElem_t*) mMC.malloc(sizeof(allocElem_t) * ELEMS_PER_SLOT);
if(p == NULL) break;
size_t pos = atomicAdd(counter,1);
const size_t offset = pos*ELEMS_PER_SLOT;
for(size_t i=0;i<ELEMS_PER_SLOT;++i){
p[i] = static_cast<allocElem_t>(offset + i);
sum += static_cast<unsigned long long>(p[i]);
}
data[pos] = p;
}
atomicAdd(globalSum,sum);
}
/**
* free all the values again
*
* @param data the datastructure to free
* @param counter should be an empty space on device memory,
* counts how many elements were freed
* @param max the maximum number of elements to free
*/
__global__ void deallocAll(
allocElem_t** data,
unsigned long long* counter,
const size_t nSlots,
ScatterAllocator::AllocatorHandle mMC
){
while(true){
size_t pos = atomicAdd(counter,1);
if(pos >= nSlots) break;
mMC.free(data[pos]);
}
}
/**
* damages one element in the data
*
* With help of this function, you can verify that
* the checks actually work as expected and can find
* an error, if one should exist
*
* @param data the datastructure to damage
*/
__global__ void damageElement(allocElem_t** data){
data[1][0] = static_cast<allocElem_t>(5*ELEMS_PER_SLOT - 1);
}
/**
* wrapper function to allocate memory on device
*
* allocates memory with mallocMC. Returns the number of
* created elements as well as the sum of these elements
*
* @param d_testData the datastructure which will hold
* pointers to the created elements
* @param h_nSlots will be filled with the number of elements
* that were allocated
* @param h_sum will be filled with the sum of all elements created
* @param blocks the size of the CUDA grid
* @param threads the number of CUDA threads per block
*/
void allocate(
allocElem_t** d_testData,
unsigned long long* h_nSlots,
unsigned long long* h_sum,
const unsigned blocks,
const unsigned threads,
ScatterAllocator mMC
){
dout() << "allocating on device...";
unsigned long long zero = 0;
unsigned long long *d_sum;
unsigned long long *d_nSlots;
MALLOCMC_CUDA_CHECKED_CALL(cudaMalloc((void**) &d_sum,sizeof(unsigned long long)));
MALLOCMC_CUDA_CHECKED_CALL(cudaMalloc((void**) &d_nSlots, sizeof(unsigned long long)));
MALLOCMC_CUDA_CHECKED_CALL(cudaMemcpy(d_sum,&zero,sizeof(unsigned long long),cudaMemcpyHostToDevice));
MALLOCMC_CUDA_CHECKED_CALL(cudaMemcpy(d_nSlots,&zero,sizeof(unsigned long long),cudaMemcpyHostToDevice));
CUDA_CHECK_KERNEL_SYNC(allocAll<<<blocks,threads>>>(d_testData, d_nSlots, d_sum, mMC ));
MALLOCMC_CUDA_CHECKED_CALL(cudaMemcpy(h_sum,d_sum,sizeof(unsigned long long),cudaMemcpyDeviceToHost));
MALLOCMC_CUDA_CHECKED_CALL(cudaMemcpy(h_nSlots,d_nSlots,sizeof(unsigned long long),cudaMemcpyDeviceToHost));
cudaFree(d_sum);
cudaFree(d_nSlots);
dout() << "done" << std::endl;
}
/**
* Wrapper function to verify allocation on device
*
* Generates the same number that was written into each position of
* the datastructure during allocation and compares the values.
*
* @param d_testData the datastructure which holds
* pointers to the elements you want to verify
* @param nSlots the size of d_testData
* @param blocks the size of the CUDA grid
* @param threads the number of CUDA threads per block
* @return true if the verification was successful, false otherwise
*/
bool verify(
allocElem_t **d_testData,
const unsigned long long nSlots,
const unsigned blocks,
const unsigned threads
){
dout() << "verifying on device... ";
const unsigned long long zero = 0;
int h_correct = 1;
int* d_correct;
unsigned long long *d_sum;
unsigned long long *d_counter;
MALLOCMC_CUDA_CHECKED_CALL(cudaMalloc((void**) &d_sum, sizeof(unsigned long long)));
MALLOCMC_CUDA_CHECKED_CALL(cudaMalloc((void**) &d_counter, sizeof(unsigned long long)));
MALLOCMC_CUDA_CHECKED_CALL(cudaMalloc((void**) &d_correct, sizeof(int)));
MALLOCMC_CUDA_CHECKED_CALL(cudaMemcpy(d_sum,&zero,sizeof(unsigned long long),cudaMemcpyHostToDevice));
MALLOCMC_CUDA_CHECKED_CALL(cudaMemcpy(d_counter,&zero,sizeof(unsigned long long),cudaMemcpyHostToDevice));
MALLOCMC_CUDA_CHECKED_CALL(cudaMemcpy(d_correct,&h_correct,sizeof(int),cudaMemcpyHostToDevice));
// can be replaced by a call to check_content_fast,
// if the gaussian sum (see below) is not used and you
// want to be a bit faster
CUDA_CHECK_KERNEL_SYNC(check_content<<<blocks,threads>>>(
d_testData,
d_counter,
d_sum,
static_cast<size_t>(nSlots),
d_correct
));
MALLOCMC_CUDA_CHECKED_CALL(cudaMemcpy(&h_correct,d_correct,sizeof(int),cudaMemcpyDeviceToHost));
// This only works, if the type "allocElem_t"
// can hold all the IDs (usually unsigned long long)
/*
dout() << "verifying on host...";
unsigned long long h_sum, h_counter;
unsigned long long gaussian_sum = (ELEMS_PER_SLOT*nSlots * (ELEMS_PER_SLOT*nSlots-1))/2;
MALLOCMC_CUDA_CHECKED_CALL(cudaMemcpy(&h_sum,d_sum,sizeof(unsigned long long),cudaMemcpyDeviceToHost));
MALLOCMC_CUDA_CHECKED_CALL(cudaMemcpy(&h_counter,d_counter,sizeof(unsigned long long),cudaMemcpyDeviceToHost));
if(gaussian_sum != h_sum){
dout() << "\nGaussian Sum doesn't match: is " << h_sum;
dout() << " (should be " << gaussian_sum << ")" << std::endl;
h_correct=false;
}
if(nSlots != h_counter-(blocks*threads)){
dout() << "\nallocated number of elements doesn't match: is " << h_counter;
dout() << " (should be " << nSlots << ")" << std::endl;
h_correct=false;
}
*/
if(h_correct){
dout() << "done" << std::endl;
}else{
dout() << "failed" << std::endl;
}
cudaFree(d_correct);
cudaFree(d_sum);
cudaFree(d_counter);
return static_cast<bool>(h_correct);
}
/**
* prints all parameters machine readable
*
* for params, see run_heap_verification-internal parameters
*/
void print_machine_readable(
const unsigned pagesize,
const unsigned accessblocks,
const unsigned regionsize,
const unsigned wastefactor,
const bool resetfreedpages,
const unsigned blocks,
const unsigned threads,
const unsigned elemsPerSlot,
const size_t allocElemSize,
const size_t heapSize,
const size_t maxSpace,
const size_t maxSlots,
const unsigned long long usedSlots,
const float allocFrac,
const size_t wasted,
const bool correct
){
std::string sep = ",";
std::stringstream h;
std::stringstream v;
h << "PagesizeByte" << sep;
v << pagesize << sep;
h << "Accessblocks" << sep;
v << accessblocks << sep;
h << "Regionsize" << sep;
v << regionsize << sep;
h << "Wastefactor" << sep;
v << wasted << sep;
h << "ResetFreedPage" << sep;
v << resetfreedpages << sep;
h << "Gridsize" << sep;
v << blocks << sep;
h << "Blocksize" << sep;
v << threads << sep;
h << "ELEMS_PER_SLOT" << sep;
v << elemsPerSlot << sep;
h << "allocElemByte" << sep;
v << allocElemSize << sep;
h << "heapsizeByte" << sep;
v << heapSize << sep;
h << "maxSpaceByte" << sep;
v << maxSpace << sep;
h << "maxSlots" << sep;
v << maxSlots << sep;
h << "usedSlots" << sep;
v << usedSlots << sep;
h << "allocFraction" << sep;
v << allocFrac << sep;
h << "wastedBytes" << sep;
v << wasted << sep;
h << "correct" ;
v << correct ;
std::cout << h.str() << std::endl;
std::cout << v.str() << std::endl;
}
/**
* Verify the heap allocation of mallocMC
*
* Allocates as much memory as the heap allows. Make sure that allocated
* memory actually holds the correct values without corrupting them. Will
* fill the datastructure with values that are relative to the index and
* later evalute, if the values inside stayed the same after allocating all
* memory.
* Datastructure: Array that holds up to nPointers pointers to arrays of size
* ELEMS_PER_SLOT, each being of type allocElem_t.
*
* @return true if the verification was successful,
* false otherwise
*/
bool run_heap_verification(
const size_t heapMB,
const unsigned blocks,
const unsigned threads,
const bool machine_readable
){
cudaSetDeviceFlags(cudaDeviceMapHost);
const size_t heapSize = size_t(1024U*1024U) * heapMB;
const size_t slotSize = sizeof(allocElem_t)*ELEMS_PER_SLOT;
const size_t nPointers = ceil(static_cast<float>(heapSize) / slotSize);
const size_t maxSlots = heapSize/slotSize;
const size_t maxSpace = maxSlots*slotSize + nPointers*sizeof(allocElem_t*);
bool correct = true;
const unsigned long long zero = 0;
dout() << "CreationPolicy Arguments:" << std::endl;
dout() << "Pagesize: " << ScatterConfig::pagesize::value << std::endl;
dout() << "Accessblocks: " << ScatterConfig::accessblocks::value << std::endl;
dout() << "Regionsize: " << ScatterConfig::regionsize::value << std::endl;
dout() << "Wastefactor: " << ScatterConfig::wastefactor::value << std::endl;
dout() << "ResetFreedPages " << ScatterConfig::resetfreedpages::value << std::endl;
dout() << "" << std::endl;
dout() << "Gridsize: " << blocks << std::endl;
dout() << "Blocksize: " << threads << std::endl;
dout() << "Allocated elements: " << ELEMS_PER_SLOT << " x " << sizeof(allocElem_t);
dout() << " Byte (" << slotSize << " Byte)" << std::endl;
dout() << "Heap: " << heapSize << " Byte";
dout() << " (" << heapSize/pow(1024,2) << " MByte)" << std::endl;
dout() << "max space w/ pointers: " << maxSpace << " Byte";
dout() << " (" << maxSpace/pow(1024,2) << " MByte)" << std::endl;
dout() << "maximum of elements: " << maxSlots << std::endl;
// initializing the heap
ScatterAllocator mMC(heapSize);
allocElem_t** d_testData;
MALLOCMC_CUDA_CHECKED_CALL(cudaMalloc((void**) &d_testData, nPointers*sizeof(allocElem_t*)));
// allocating with mallocMC
unsigned long long usedSlots = 0;
unsigned long long sumAllocElems = 0;
allocate(d_testData, &usedSlots, &sumAllocElems, blocks, threads, mMC);
const float allocFrac = static_cast<float>(usedSlots)*100/maxSlots;
const size_t wasted = heapSize - static_cast<size_t>(usedSlots) * slotSize;
dout() << "allocated elements: " << usedSlots;
dout() << " (" << allocFrac << "%)" << std::endl;
dout() << "wasted heap space: " << wasted << " Byte";
dout() << " (" << wasted/pow(1024,2) << " MByte)" << std::endl;
// verifying on device
correct = correct && verify(d_testData,usedSlots,blocks,threads);
// damaging one cell
dout() << "damaging of element... ";
CUDA_CHECK_KERNEL_SYNC(damageElement<<<1,1>>>(d_testData));
dout() << "done" << std::endl;
// verifying on device
// THIS SHOULD FAIL (damage was done before!). Therefore, we must inverse the logic
correct = correct && !verify(d_testData,usedSlots,blocks,threads);
// release all memory
dout() << "deallocation... ";
unsigned long long* d_dealloc_counter;
MALLOCMC_CUDA_CHECKED_CALL(cudaMalloc((void**) &d_dealloc_counter, sizeof(unsigned long long)));
MALLOCMC_CUDA_CHECKED_CALL(cudaMemcpy(d_dealloc_counter,&zero,sizeof(unsigned long long),cudaMemcpyHostToDevice));
CUDA_CHECK_KERNEL_SYNC(deallocAll<<<blocks,threads>>>(d_testData,d_dealloc_counter,static_cast<size_t>(usedSlots), mMC ));
cudaFree(d_dealloc_counter);
cudaFree(d_testData);
mMC.finalizeHeap();
dout() << "done "<< std::endl;
if(machine_readable){
print_machine_readable(
ScatterConfig::pagesize::value,
ScatterConfig::accessblocks::value,
ScatterConfig::regionsize::value,
ScatterConfig::wastefactor::value,
ScatterConfig::resetfreedpages::value,
blocks,
threads,
ELEMS_PER_SLOT,
sizeof(allocElem_t),
heapSize,
maxSpace,
maxSlots,
usedSlots,
allocFrac,
wasted,
correct
);
}
return correct;
}
|
c188d9b0d8ca98cb849136c6a0c07ff449057ebe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2017 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
// Note: the intial source code ist taken from opencv.
// Opencv license
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/imageProcessing/imageProcessing.h"
namespace Saiga
{
namespace CUDA
{
__constant__ float d_Kernel[SAIGA_MAX_KERNEL_SIZE];
template <int KSIZE>
__global__ void linearRowFilter(ImageView<float> src, ImageView<float> dst, const int anchor)
{
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 8;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
using sum_t = float;
__shared__ sum_t smem[BLOCK_DIM_Y][(PATCH_PER_BLOCK + 2 * HALO_SIZE) * BLOCK_DIM_X];
const int y = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;
if (y >= src.height) return;
const float* src_row = src.rowPtr(y);
const int xStart = blockIdx.x * (PATCH_PER_BLOCK * BLOCK_DIM_X) + threadIdx.x;
if (blockIdx.x > 0)
{
// Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = src_row[xStart - (HALO_SIZE - j) * BLOCK_DIM_X];
}
else
{
// Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
// smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = brd.at_low(xStart - (HALO_SIZE - j) *
// BLOCK_DIM_X, src_row);
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = src_row[max(xStart - (HALO_SIZE - j) * BLOCK_DIM_X, 0)];
}
if (blockIdx.x + 2 < gridDim.x)
{
// Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] =
src_row[xStart + j * BLOCK_DIM_X];
// Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] =
src_row[xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X];
}
else
{
// Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
// smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] =
// brd.at_high(xStart + j * BLOCK_DIM_X, src_row);
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] =
src_row[min(xStart + j * BLOCK_DIM_X, src.width - 1)];
// Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
// smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X]
// = brd.at_high(xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X, src_row);
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] =
src_row[min(xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X, src.width - 1)];
}
__syncthreads();
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
{
const int x = xStart + j * BLOCK_DIM_X;
if (x < src.width)
{
sum_t sum = 0;
#pragma unroll
for (int k = 0; k < KSIZE; ++k)
sum = sum + smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X - anchor + k] *
d_Kernel[k];
dst(y, x) = sum;
}
}
}
template <typename T, int RADIUS>
static void convolveRow(ImageView<float> src, ImageView<float> dst)
{
const int BLOCK_W = 32;
const int BLOCK_H = 8;
const int PATCH_PER_BLOCK = 4;
const dim3 block(BLOCK_W, BLOCK_H);
const dim3 grid(iDivUp(src.width, BLOCK_W * PATCH_PER_BLOCK), iDivUp(src.height, BLOCK_H));
const int ksize = RADIUS * 2 + 1;
int anchor = ksize >> 1;
hipLaunchKernelGGL(( linearRowFilter<ksize>), dim3(grid), dim3(block), 0, 0, src, dst, anchor);
}
void convolveRow(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel, int radius)
{
SAIGA_ASSERT(kernel.size() > 0 && kernel.size() <= SAIGA_MAX_KERNEL_SIZE);
CHECK_CUDA_ERROR(
hipMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, hipMemcpyDeviceToDevice));
switch (radius)
{
case 1:
convolveRow<float, 1>(src, dst);
break;
case 2:
convolveRow<float, 2>(src, dst);
break;
case 3:
convolveRow<float, 3>(src, dst);
break;
case 4:
convolveRow<float, 4>(src, dst);
break;
case 5:
convolveRow<float, 5>(src, dst);
break;
case 6:
convolveRow<float, 6>(src, dst);
break;
case 7:
convolveRow<float, 7>(src, dst);
break;
case 8:
convolveRow<float, 8>(src, dst);
break;
case 9:
convolveRow<float, 9>(src, dst);
break;
case 10:
convolveRow<float, 10>(src, dst);
break;
case 11:
convolveRow<float, 11>(src, dst);
break;
case 12:
convolveRow<float, 12>(src, dst);
break;
case 13:
convolveRow<float, 13>(src, dst);
break;
case 14:
convolveRow<float, 14>(src, dst);
break;
case 15:
convolveRow<float, 15>(src, dst);
break;
case 16:
convolveRow<float, 16>(src, dst);
break;
case 17:
convolveRow<float, 17>(src, dst);
break;
case 18:
convolveRow<float, 18>(src, dst);
break;
case 19:
convolveRow<float, 19>(src, dst);
break;
case 20:
convolveRow<float, 20>(src, dst);
break;
case 21:
convolveRow<float, 21>(src, dst);
break;
case 22:
convolveRow<float, 22>(src, dst);
break;
case 23:
convolveRow<float, 23>(src, dst);
break;
case 24:
convolveRow<float, 24>(src, dst);
break;
default:
SAIGA_ASSERT(0);
}
}
} // namespace CUDA
} // namespace Saiga
| c188d9b0d8ca98cb849136c6a0c07ff449057ebe.cu | /**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
// Note: the intial source code ist taken from opencv.
// Opencv license
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/imageProcessing/imageProcessing.h"
namespace Saiga
{
namespace CUDA
{
__constant__ float d_Kernel[SAIGA_MAX_KERNEL_SIZE];
template <int KSIZE>
__global__ void linearRowFilter(ImageView<float> src, ImageView<float> dst, const int anchor)
{
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 8;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
using sum_t = float;
__shared__ sum_t smem[BLOCK_DIM_Y][(PATCH_PER_BLOCK + 2 * HALO_SIZE) * BLOCK_DIM_X];
const int y = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;
if (y >= src.height) return;
const float* src_row = src.rowPtr(y);
const int xStart = blockIdx.x * (PATCH_PER_BLOCK * BLOCK_DIM_X) + threadIdx.x;
if (blockIdx.x > 0)
{
// Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = src_row[xStart - (HALO_SIZE - j) * BLOCK_DIM_X];
}
else
{
// Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
// smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = brd.at_low(xStart - (HALO_SIZE - j) *
// BLOCK_DIM_X, src_row);
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = src_row[max(xStart - (HALO_SIZE - j) * BLOCK_DIM_X, 0)];
}
if (blockIdx.x + 2 < gridDim.x)
{
// Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] =
src_row[xStart + j * BLOCK_DIM_X];
// Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] =
src_row[xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X];
}
else
{
// Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
// smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] =
// brd.at_high(xStart + j * BLOCK_DIM_X, src_row);
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] =
src_row[min(xStart + j * BLOCK_DIM_X, src.width - 1)];
// Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
// smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X]
// = brd.at_high(xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X, src_row);
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] =
src_row[min(xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X, src.width - 1)];
}
__syncthreads();
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
{
const int x = xStart + j * BLOCK_DIM_X;
if (x < src.width)
{
sum_t sum = 0;
#pragma unroll
for (int k = 0; k < KSIZE; ++k)
sum = sum + smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X - anchor + k] *
d_Kernel[k];
dst(y, x) = sum;
}
}
}
template <typename T, int RADIUS>
static void convolveRow(ImageView<float> src, ImageView<float> dst)
{
const int BLOCK_W = 32;
const int BLOCK_H = 8;
const int PATCH_PER_BLOCK = 4;
const dim3 block(BLOCK_W, BLOCK_H);
const dim3 grid(iDivUp(src.width, BLOCK_W * PATCH_PER_BLOCK), iDivUp(src.height, BLOCK_H));
const int ksize = RADIUS * 2 + 1;
int anchor = ksize >> 1;
linearRowFilter<ksize><<<grid, block>>>(src, dst, anchor);
}
void convolveRow(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel, int radius)
{
SAIGA_ASSERT(kernel.size() > 0 && kernel.size() <= SAIGA_MAX_KERNEL_SIZE);
CHECK_CUDA_ERROR(
cudaMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, cudaMemcpyDeviceToDevice));
switch (radius)
{
case 1:
convolveRow<float, 1>(src, dst);
break;
case 2:
convolveRow<float, 2>(src, dst);
break;
case 3:
convolveRow<float, 3>(src, dst);
break;
case 4:
convolveRow<float, 4>(src, dst);
break;
case 5:
convolveRow<float, 5>(src, dst);
break;
case 6:
convolveRow<float, 6>(src, dst);
break;
case 7:
convolveRow<float, 7>(src, dst);
break;
case 8:
convolveRow<float, 8>(src, dst);
break;
case 9:
convolveRow<float, 9>(src, dst);
break;
case 10:
convolveRow<float, 10>(src, dst);
break;
case 11:
convolveRow<float, 11>(src, dst);
break;
case 12:
convolveRow<float, 12>(src, dst);
break;
case 13:
convolveRow<float, 13>(src, dst);
break;
case 14:
convolveRow<float, 14>(src, dst);
break;
case 15:
convolveRow<float, 15>(src, dst);
break;
case 16:
convolveRow<float, 16>(src, dst);
break;
case 17:
convolveRow<float, 17>(src, dst);
break;
case 18:
convolveRow<float, 18>(src, dst);
break;
case 19:
convolveRow<float, 19>(src, dst);
break;
case 20:
convolveRow<float, 20>(src, dst);
break;
case 21:
convolveRow<float, 21>(src, dst);
break;
case 22:
convolveRow<float, 22>(src, dst);
break;
case 23:
convolveRow<float, 23>(src, dst);
break;
case 24:
convolveRow<float, 24>(src, dst);
break;
default:
SAIGA_ASSERT(0);
}
}
} // namespace CUDA
} // namespace Saiga
|
16c48cb56bc3e92fdade19c944f11cf36ef4ab08.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/utilities/device_atomics.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/reduction.hpp>
namespace cudf {
namespace detail {
/**
* @brief Dispatcher for running Scan operation on input column
* Dispatches scan operation on `Op` and creates output column
*
* @tparam Op device binary operator
*/
template <typename Op>
struct ScanDispatcher {
private:
template <typename T>
static constexpr bool is_string_supported()
{
return std::is_same<T, string_view>::value &&
(std::is_same<Op, cudf::DeviceMin>::value || std::is_same<Op, cudf::DeviceMax>::value);
}
// return true if T is arithmetic type (including bool)
template <typename T>
static constexpr bool is_supported()
{
return std::is_arithmetic<T>::value || is_string_supported<T>();
}
// for arithmetic types
template <typename T, std::enable_if_t<std::is_arithmetic<T>::value, T>* = nullptr>
auto exclusive_scan(const column_view& input_view,
null_policy null_handling,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
const size_type size = input_view.size();
auto output_column =
detail::allocate_like(input_view, size, mask_allocation_policy::NEVER, mr, stream);
if (null_handling == null_policy::EXCLUDE) {
output_column->set_null_mask(copy_bitmask(input_view, stream, mr), input_view.null_count());
}
mutable_column_view output = output_column->mutable_view();
auto d_input = column_device_view::create(input_view, stream);
if (input_view.has_nulls()) {
auto input = make_null_replacement_iterator(*d_input, Op::template identity<T>());
thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream),
input,
input + size,
output.data<T>(),
Op::template identity<T>(),
Op{});
} else {
auto input = d_input->begin<T>();
thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream),
input,
input + size,
output.data<T>(),
Op::template identity<T>(),
Op{});
}
CHECK_CUDA(stream);
return output_column;
}
// for string type
template <typename T, std::enable_if_t<is_string_supported<T>(), T>* = nullptr>
std::unique_ptr<column> exclusive_scan(const column_view& input_view,
null_policy null_handling,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("String types supports only inclusive min/max for `cudf::scan`");
}
rmm::device_buffer mask_inclusive_scan(const column_view& input_view,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
rmm::device_buffer mask =
create_null_mask(input_view.size(), mask_state::UNINITIALIZED, stream, mr);
auto d_input = column_device_view::create(input_view, stream);
auto v = detail::make_validity_iterator(*d_input);
auto first_null_position =
thrust::find_if_not(
rmm::exec_policy(stream)->on(stream), v, v + input_view.size(), thrust::identity<bool>{}) -
v;
cudf::set_null_mask(
static_cast<cudf::bitmask_type*>(mask.data()), 0, first_null_position, true);
cudf::set_null_mask(
static_cast<cudf::bitmask_type*>(mask.data()), first_null_position, input_view.size(), false);
return mask;
}
// for arithmetic types
template <typename T, std::enable_if_t<std::is_arithmetic<T>::value, T>* = nullptr>
auto inclusive_scan(const column_view& input_view,
null_policy null_handling,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
const size_type size = input_view.size();
auto output_column =
detail::allocate_like(input_view, size, mask_allocation_policy::NEVER, mr, stream);
if (null_handling == null_policy::EXCLUDE) {
output_column->set_null_mask(copy_bitmask(input_view, stream, mr), input_view.null_count());
} else {
if (input_view.nullable()) {
output_column->set_null_mask(mask_inclusive_scan(input_view, mr, stream),
cudf::UNKNOWN_NULL_COUNT);
}
}
auto d_input = column_device_view::create(input_view, stream);
mutable_column_view output = output_column->mutable_view();
if (input_view.has_nulls()) {
auto input = make_null_replacement_iterator(*d_input, Op::template identity<T>());
thrust::inclusive_scan(
rmm::exec_policy(stream)->on(stream), input, input + size, output.data<T>(), Op{});
} else {
auto input = d_input->begin<T>();
thrust::inclusive_scan(
rmm::exec_policy(stream)->on(stream), input, input + size, output.data<T>(), Op{});
}
CHECK_CUDA(stream);
return output_column;
}
// for string type
template <typename T, std::enable_if_t<is_string_supported<T>(), T>* = nullptr>
std::unique_ptr<column> inclusive_scan(const column_view& input_view,
null_policy null_handling,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
const size_type size = input_view.size();
rmm::device_vector<T> result(size);
auto d_input = column_device_view::create(input_view, stream);
if (input_view.has_nulls()) {
auto input = make_null_replacement_iterator(*d_input, Op::template identity<T>());
thrust::inclusive_scan(
rmm::exec_policy(stream)->on(stream), input, input + size, result.data().get(), Op{});
} else {
auto input = d_input->begin<T>();
thrust::inclusive_scan(
rmm::exec_policy(stream)->on(stream), input, input + size, result.data().get(), Op{});
}
CHECK_CUDA(stream);
auto output_column = make_strings_column(result, Op::template identity<T>(), stream, mr);
if (null_handling == null_policy::EXCLUDE) {
output_column->set_null_mask(copy_bitmask(input_view, stream, mr), input_view.null_count());
} else {
if (input_view.nullable()) {
output_column->set_null_mask(mask_inclusive_scan(input_view, mr, stream),
cudf::UNKNOWN_NULL_COUNT);
}
}
return output_column;
}
public:
/**
* @brief creates new column from input column by applying scan operation
*
* @param input input column view
* @param inclusive inclusive or exclusive scan
* @param mr Device memory resource used to allocate the returned column's device memory
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return
*
* @tparam T type of input column
*/
template <typename T, typename std::enable_if_t<is_supported<T>(), T>* = nullptr>
std::unique_ptr<column> operator()(const column_view& input,
scan_type inclusive,
null_policy null_handling,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
std::unique_ptr<column> output;
if (inclusive == scan_type::INCLUSIVE)
output = inclusive_scan<T>(input, null_handling, mr, stream);
else
output = exclusive_scan<T>(input, null_handling, mr, stream);
if (null_handling == null_policy::EXCLUDE) {
CUDF_EXPECTS(input.null_count() == output->null_count(),
"Input / output column null count mismatch");
}
return output;
}
template <typename T, typename std::enable_if_t<!is_supported<T>(), T>* = nullptr>
std::unique_ptr<column> operator()(const column_view& input,
scan_type inclusive,
null_policy null_handling,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("Non-arithmetic types not supported for `cudf::scan`");
}
};
std::unique_ptr<column> scan(
const column_view& input,
std::unique_ptr<aggregation> const& agg,
scan_type inclusive,
null_policy null_handling,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(),
hipStream_t stream = 0)
{
CUDF_EXPECTS(is_numeric(input.type()) || is_compound(input.type()),
"Unexpected non-numeric or non-string type.");
switch (agg->kind) {
case aggregation::SUM:
return cudf::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceSum>(),
input,
inclusive,
null_handling,
mr,
stream);
case aggregation::MIN:
return cudf::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceMin>(),
input,
inclusive,
null_handling,
mr,
stream);
case aggregation::MAX:
return cudf::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceMax>(),
input,
inclusive,
null_handling,
mr,
stream);
case aggregation::PRODUCT:
return cudf::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceProduct>(),
input,
inclusive,
null_handling,
mr,
stream);
default: CUDF_FAIL("Unsupported aggregation operator for scan");
}
}
} // namespace detail
std::unique_ptr<column> scan(const column_view& input,
std::unique_ptr<aggregation> const& agg,
scan_type inclusive,
null_policy null_handling,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::scan(input, agg, inclusive, null_handling, mr);
}
} // namespace cudf
| 16c48cb56bc3e92fdade19c944f11cf36ef4ab08.cu | #include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/utilities/device_atomics.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/reduction.hpp>
namespace cudf {
namespace detail {
/**
* @brief Dispatcher for running Scan operation on input column
* Dispatches scan operation on `Op` and creates output column
*
* @tparam Op device binary operator
*/
template <typename Op>
struct ScanDispatcher {
private:
template <typename T>
static constexpr bool is_string_supported()
{
return std::is_same<T, string_view>::value &&
(std::is_same<Op, cudf::DeviceMin>::value || std::is_same<Op, cudf::DeviceMax>::value);
}
// return true if T is arithmetic type (including bool)
template <typename T>
static constexpr bool is_supported()
{
return std::is_arithmetic<T>::value || is_string_supported<T>();
}
// for arithmetic types
template <typename T, std::enable_if_t<std::is_arithmetic<T>::value, T>* = nullptr>
auto exclusive_scan(const column_view& input_view,
null_policy null_handling,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
const size_type size = input_view.size();
auto output_column =
detail::allocate_like(input_view, size, mask_allocation_policy::NEVER, mr, stream);
if (null_handling == null_policy::EXCLUDE) {
output_column->set_null_mask(copy_bitmask(input_view, stream, mr), input_view.null_count());
}
mutable_column_view output = output_column->mutable_view();
auto d_input = column_device_view::create(input_view, stream);
if (input_view.has_nulls()) {
auto input = make_null_replacement_iterator(*d_input, Op::template identity<T>());
thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream),
input,
input + size,
output.data<T>(),
Op::template identity<T>(),
Op{});
} else {
auto input = d_input->begin<T>();
thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream),
input,
input + size,
output.data<T>(),
Op::template identity<T>(),
Op{});
}
CHECK_CUDA(stream);
return output_column;
}
// for string type
template <typename T, std::enable_if_t<is_string_supported<T>(), T>* = nullptr>
std::unique_ptr<column> exclusive_scan(const column_view& input_view,
null_policy null_handling,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("String types supports only inclusive min/max for `cudf::scan`");
}
rmm::device_buffer mask_inclusive_scan(const column_view& input_view,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
rmm::device_buffer mask =
create_null_mask(input_view.size(), mask_state::UNINITIALIZED, stream, mr);
auto d_input = column_device_view::create(input_view, stream);
auto v = detail::make_validity_iterator(*d_input);
auto first_null_position =
thrust::find_if_not(
rmm::exec_policy(stream)->on(stream), v, v + input_view.size(), thrust::identity<bool>{}) -
v;
cudf::set_null_mask(
static_cast<cudf::bitmask_type*>(mask.data()), 0, first_null_position, true);
cudf::set_null_mask(
static_cast<cudf::bitmask_type*>(mask.data()), first_null_position, input_view.size(), false);
return mask;
}
// for arithmetic types
template <typename T, std::enable_if_t<std::is_arithmetic<T>::value, T>* = nullptr>
auto inclusive_scan(const column_view& input_view,
null_policy null_handling,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
const size_type size = input_view.size();
auto output_column =
detail::allocate_like(input_view, size, mask_allocation_policy::NEVER, mr, stream);
if (null_handling == null_policy::EXCLUDE) {
output_column->set_null_mask(copy_bitmask(input_view, stream, mr), input_view.null_count());
} else {
if (input_view.nullable()) {
output_column->set_null_mask(mask_inclusive_scan(input_view, mr, stream),
cudf::UNKNOWN_NULL_COUNT);
}
}
auto d_input = column_device_view::create(input_view, stream);
mutable_column_view output = output_column->mutable_view();
if (input_view.has_nulls()) {
auto input = make_null_replacement_iterator(*d_input, Op::template identity<T>());
thrust::inclusive_scan(
rmm::exec_policy(stream)->on(stream), input, input + size, output.data<T>(), Op{});
} else {
auto input = d_input->begin<T>();
thrust::inclusive_scan(
rmm::exec_policy(stream)->on(stream), input, input + size, output.data<T>(), Op{});
}
CHECK_CUDA(stream);
return output_column;
}
// for string type
template <typename T, std::enable_if_t<is_string_supported<T>(), T>* = nullptr>
std::unique_ptr<column> inclusive_scan(const column_view& input_view,
null_policy null_handling,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
const size_type size = input_view.size();
rmm::device_vector<T> result(size);
auto d_input = column_device_view::create(input_view, stream);
if (input_view.has_nulls()) {
auto input = make_null_replacement_iterator(*d_input, Op::template identity<T>());
thrust::inclusive_scan(
rmm::exec_policy(stream)->on(stream), input, input + size, result.data().get(), Op{});
} else {
auto input = d_input->begin<T>();
thrust::inclusive_scan(
rmm::exec_policy(stream)->on(stream), input, input + size, result.data().get(), Op{});
}
CHECK_CUDA(stream);
auto output_column = make_strings_column(result, Op::template identity<T>(), stream, mr);
if (null_handling == null_policy::EXCLUDE) {
output_column->set_null_mask(copy_bitmask(input_view, stream, mr), input_view.null_count());
} else {
if (input_view.nullable()) {
output_column->set_null_mask(mask_inclusive_scan(input_view, mr, stream),
cudf::UNKNOWN_NULL_COUNT);
}
}
return output_column;
}
public:
/**
* @brief creates new column from input column by applying scan operation
*
* @param input input column view
* @param inclusive inclusive or exclusive scan
* @param mr Device memory resource used to allocate the returned column's device memory
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return
*
* @tparam T type of input column
*/
template <typename T, typename std::enable_if_t<is_supported<T>(), T>* = nullptr>
std::unique_ptr<column> operator()(const column_view& input,
scan_type inclusive,
null_policy null_handling,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
std::unique_ptr<column> output;
if (inclusive == scan_type::INCLUSIVE)
output = inclusive_scan<T>(input, null_handling, mr, stream);
else
output = exclusive_scan<T>(input, null_handling, mr, stream);
if (null_handling == null_policy::EXCLUDE) {
CUDF_EXPECTS(input.null_count() == output->null_count(),
"Input / output column null count mismatch");
}
return output;
}
template <typename T, typename std::enable_if_t<!is_supported<T>(), T>* = nullptr>
std::unique_ptr<column> operator()(const column_view& input,
scan_type inclusive,
null_policy null_handling,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("Non-arithmetic types not supported for `cudf::scan`");
}
};
std::unique_ptr<column> scan(
const column_view& input,
std::unique_ptr<aggregation> const& agg,
scan_type inclusive,
null_policy null_handling,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(),
cudaStream_t stream = 0)
{
CUDF_EXPECTS(is_numeric(input.type()) || is_compound(input.type()),
"Unexpected non-numeric or non-string type.");
switch (agg->kind) {
case aggregation::SUM:
return cudf::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceSum>(),
input,
inclusive,
null_handling,
mr,
stream);
case aggregation::MIN:
return cudf::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceMin>(),
input,
inclusive,
null_handling,
mr,
stream);
case aggregation::MAX:
return cudf::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceMax>(),
input,
inclusive,
null_handling,
mr,
stream);
case aggregation::PRODUCT:
return cudf::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceProduct>(),
input,
inclusive,
null_handling,
mr,
stream);
default: CUDF_FAIL("Unsupported aggregation operator for scan");
}
}
} // namespace detail
std::unique_ptr<column> scan(const column_view& input,
std::unique_ptr<aggregation> const& agg,
scan_type inclusive,
null_policy null_handling,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::scan(input, agg, inclusive, null_handling, mr);
}
} // namespace cudf
|
28b5936b44fbee1a89fea349c165ca3c1fc61e3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kTranspose(float* a, float* dest, int width, int height) {
const int bx = blockIdx.x * blockDim.x;
const int by = blockIdx.y * blockDim.y;
const int tx = bx + threadIdx.x;
const int ty = by + threadIdx.y;
// unsigned int idx = ty * width + tx;
__shared__
float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1];
if (tx < width && ty < height) {
smem[threadIdx.y][threadIdx.x] = a[ty * width + tx];
}
__syncthreads();
if (by + threadIdx.x < height && threadIdx.y + bx < width) {
// idx = height * (blockIdx.x * blockDim.x + threadIdx.y) + blockIdx.y * blockDim.y + threadIdx.x;
dest[(bx + threadIdx.y) * height + by + threadIdx.x] = smem[threadIdx.x][threadIdx.y];
}
} | 28b5936b44fbee1a89fea349c165ca3c1fc61e3d.cu | #include "includes.h"
__global__ void kTranspose(float* a, float* dest, int width, int height) {
const int bx = blockIdx.x * blockDim.x;
const int by = blockIdx.y * blockDim.y;
const int tx = bx + threadIdx.x;
const int ty = by + threadIdx.y;
// unsigned int idx = ty * width + tx;
__shared__
float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1];
if (tx < width && ty < height) {
smem[threadIdx.y][threadIdx.x] = a[ty * width + tx];
}
__syncthreads();
if (by + threadIdx.x < height && threadIdx.y + bx < width) {
// idx = height * (blockIdx.x * blockDim.x + threadIdx.y) + blockIdx.y * blockDim.y + threadIdx.x;
dest[(bx + threadIdx.y) * height + by + threadIdx.x] = smem[threadIdx.x][threadIdx.y];
}
} |
b0a74b9f233bde1a3c27700a9677c2d1bdcfc856.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <include/labwork.h>
#include <hip/hip_runtime_api.h>
#include <omp.h>
#define ACTIVE_THREADS 4
int main(int argc, char **argv) {
printf("USTH ICT Master 2018, Advanced Programming for HPC.\n");
if (argc < 2) {
printf("Usage: labwork <lwNum> <inputImage>\n");
printf(" lwNum labwork number\n");
printf(" inputImage the input file name, in JPEG format\n");
return 0;
}
int lwNum = atoi(argv[1]);
std::string inputFilename;
// pre-initialize CUDA to avoid incorrect profiling
printf("Warming up...\n");
char *temp;
hipMalloc(&temp, 1024);
Labwork labwork;
if (lwNum != 2 ) {
inputFilename = std::string(argv[2]);
labwork.loadInputImage(inputFilename);
}
printf("Starting labwork %d\n", lwNum);
Timer timer;
timer.start();
switch (lwNum) {
case 1:
labwork.labwork1_CPU();
labwork.saveOutputImage("labwork2-cpu-out.jpg");
printf("labwork 1 CPU ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
timer.start();
labwork.labwork1_OpenMP();
labwork.saveOutputImage("labwork2-openmp-out.jpg");
printf("labwork 1 OpenMP ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
timer.start();
break;
case 2:
labwork.labwork2_GPU();
break;
case 3:
labwork.labwork3_GPU();
labwork.saveOutputImage("labwork3-gpu-out.jpg");
break;
case 4:
labwork.labwork4_GPU();
labwork.saveOutputImage("labwork4-gpu-out.jpg");
break;
case 5:
labwork.labwork5_CPU();
labwork.saveOutputImage("labwork5-cpu-out.jpg");
labwork.labwork5_GPU();
labwork.saveOutputImage("labwork5-gpu-out.jpg");
break;
case 6:
labwork.labwork6_GPU();
labwork.saveOutputImage("labwork6-gpu-out.jpg");
break;
case 7:
labwork.labwork7_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork7-gpu-out.jpg");
break;
case 8:
labwork.labwork8_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork8-gpu-out.jpg");
break;
case 9:
labwork.labwork9_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork9-gpu-out.jpg");
break;
case 10:
labwork.labwork10_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork10-gpu-out.jpg");
break;
}
printf("labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
}
void Labwork::loadInputImage(std::string inputFileName) {
inputImage = jpegLoader.load(inputFileName);
}
void Labwork::saveOutputImage(std::string outputFileName) {
jpegLoader.save(outputFileName, outputImage, inputImage->width, inputImage->height, 90);
}
void Labwork::labwork1_CPU() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
void Labwork::labwork1_OpenMP() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
#pragma omp parallel dynamic for
{
for (int j = 0; j < 100; j++) {
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
}
int getSPcores(hipDeviceProp_t devProp) {
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major) {
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
void Labwork::labwork2_GPU() {
int nDevices = 0;
// get all devices
hipGetDeviceCount(&nDevices);
printf("Number total of GPU : %d\n\n", nDevices);
for (int i = 0; i < nDevices; i++){
// get informations from individual device
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
// something more here
}
}
void Labwork::labwork3_GPU() {
// Calculate number of pixels
// Allocate CUDA memory
// Copy CUDA Memory from CPU to GPU
// Processing
// Copy CUDA Memory from GPU to CPU
// Cleaning
}
void Labwork::labwork4_GPU() {
}
void Labwork::labwork5_GPU(bool shared) {
}
void Labwork::labwork6_GPU() {
}
void Labwork::labwork7_GPU() {
}
void Labwork::labwork8_GPU() {
}
void Labwork::labwork9_GPU() {
}
void Labwork::labwork10_GPU(){
}
| b0a74b9f233bde1a3c27700a9677c2d1bdcfc856.cu | #include <stdio.h>
#include <include/labwork.h>
#include <cuda_runtime_api.h>
#include <omp.h>
#define ACTIVE_THREADS 4
int main(int argc, char **argv) {
printf("USTH ICT Master 2018, Advanced Programming for HPC.\n");
if (argc < 2) {
printf("Usage: labwork <lwNum> <inputImage>\n");
printf(" lwNum labwork number\n");
printf(" inputImage the input file name, in JPEG format\n");
return 0;
}
int lwNum = atoi(argv[1]);
std::string inputFilename;
// pre-initialize CUDA to avoid incorrect profiling
printf("Warming up...\n");
char *temp;
cudaMalloc(&temp, 1024);
Labwork labwork;
if (lwNum != 2 ) {
inputFilename = std::string(argv[2]);
labwork.loadInputImage(inputFilename);
}
printf("Starting labwork %d\n", lwNum);
Timer timer;
timer.start();
switch (lwNum) {
case 1:
labwork.labwork1_CPU();
labwork.saveOutputImage("labwork2-cpu-out.jpg");
printf("labwork 1 CPU ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
timer.start();
labwork.labwork1_OpenMP();
labwork.saveOutputImage("labwork2-openmp-out.jpg");
printf("labwork 1 OpenMP ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
timer.start();
break;
case 2:
labwork.labwork2_GPU();
break;
case 3:
labwork.labwork3_GPU();
labwork.saveOutputImage("labwork3-gpu-out.jpg");
break;
case 4:
labwork.labwork4_GPU();
labwork.saveOutputImage("labwork4-gpu-out.jpg");
break;
case 5:
labwork.labwork5_CPU();
labwork.saveOutputImage("labwork5-cpu-out.jpg");
labwork.labwork5_GPU();
labwork.saveOutputImage("labwork5-gpu-out.jpg");
break;
case 6:
labwork.labwork6_GPU();
labwork.saveOutputImage("labwork6-gpu-out.jpg");
break;
case 7:
labwork.labwork7_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork7-gpu-out.jpg");
break;
case 8:
labwork.labwork8_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork8-gpu-out.jpg");
break;
case 9:
labwork.labwork9_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork9-gpu-out.jpg");
break;
case 10:
labwork.labwork10_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork10-gpu-out.jpg");
break;
}
printf("labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
}
void Labwork::loadInputImage(std::string inputFileName) {
inputImage = jpegLoader.load(inputFileName);
}
void Labwork::saveOutputImage(std::string outputFileName) {
jpegLoader.save(outputFileName, outputImage, inputImage->width, inputImage->height, 90);
}
void Labwork::labwork1_CPU() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
void Labwork::labwork1_OpenMP() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
#pragma omp parallel dynamic for
{
for (int j = 0; j < 100; j++) {
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
}
int getSPcores(cudaDeviceProp devProp) {
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major) {
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
void Labwork::labwork2_GPU() {
int nDevices = 0;
// get all devices
cudaGetDeviceCount(&nDevices);
printf("Number total of GPU : %d\n\n", nDevices);
for (int i = 0; i < nDevices; i++){
// get informations from individual device
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
// something more here
}
}
void Labwork::labwork3_GPU() {
// Calculate number of pixels
// Allocate CUDA memory
// Copy CUDA Memory from CPU to GPU
// Processing
// Copy CUDA Memory from GPU to CPU
// Cleaning
}
void Labwork::labwork4_GPU() {
}
void Labwork::labwork5_GPU(bool shared) {
}
void Labwork::labwork6_GPU() {
}
void Labwork::labwork7_GPU() {
}
void Labwork::labwork8_GPU() {
}
void Labwork::labwork9_GPU() {
}
void Labwork::labwork10_GPU(){
}
|
f5788e4c29e69e3565c70ea71ca2e5a251a0c8e3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main() {
FILE *outfile;
int nDevices;
//output file pointer
outfile = fopen("ee16b068_1.txt", "w");
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
//
printf("Device Number: %d\n", i);
//
printf(" Device name: %s\n", prop.name);
//
printf(" Memory Clock Rate (KHz): %d\n",prop.memoryClockRate);
//
printf(" Memory Bus Width (bits): %d\n",prop.memoryBusWidth);
//
printf(" Is L1 Cache supported globally :(0/1) %d\n",prop.globalL1CacheSupported);
fprintf(outfile,"%d\n",prop.globalL1CacheSupported);
//
printf(" Is L1 Cache supported locally :(0/1) %d\n",prop.localL1CacheSupported);
fprintf(outfile,"%d\n",prop.localL1CacheSupported);
//
printf(" L2 Cache Size (bytes) : %d\n",prop.l2CacheSize);
fprintf(outfile,"%d\n",prop.l2CacheSize);
//
printf(" Max no of threads per block : %d\n",prop.maxThreadsPerBlock);
fprintf(outfile,"%d\n",prop.maxThreadsPerBlock);
//
printf(" No of registers available in a block : %d\n",prop.regsPerBlock);
fprintf(outfile,"%d\n",prop.regsPerBlock);
//
printf(" No of registers available in a streaming multiprocessor : %d\n",prop.regsPerMultiprocessor);
fprintf(outfile,"%d\n",prop.regsPerMultiprocessor);
//
printf(" Warp Size :(bytes) %d\n",prop.warpSize);
fprintf(outfile,"%d\n",prop.warpSize);
//
printf(" Grid Size :(bytes) %ld\n",prop.maxGridSize);
//
printf(" Total memory :(bytes) %ld\n",prop.totalGlobalMem);
fprintf(outfile,"%ld\n",prop.totalGlobalMem);
//
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
| f5788e4c29e69e3565c70ea71ca2e5a251a0c8e3.cu | #include <stdio.h>
int main() {
FILE *outfile;
int nDevices;
//output file pointer
outfile = fopen("ee16b068_1.txt", "w");
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
//
printf("Device Number: %d\n", i);
//
printf(" Device name: %s\n", prop.name);
//
printf(" Memory Clock Rate (KHz): %d\n",prop.memoryClockRate);
//
printf(" Memory Bus Width (bits): %d\n",prop.memoryBusWidth);
//
printf(" Is L1 Cache supported globally :(0/1) %d\n",prop.globalL1CacheSupported);
fprintf(outfile,"%d\n",prop.globalL1CacheSupported);
//
printf(" Is L1 Cache supported locally :(0/1) %d\n",prop.localL1CacheSupported);
fprintf(outfile,"%d\n",prop.localL1CacheSupported);
//
printf(" L2 Cache Size (bytes) : %d\n",prop.l2CacheSize);
fprintf(outfile,"%d\n",prop.l2CacheSize);
//
printf(" Max no of threads per block : %d\n",prop.maxThreadsPerBlock);
fprintf(outfile,"%d\n",prop.maxThreadsPerBlock);
//
printf(" No of registers available in a block : %d\n",prop.regsPerBlock);
fprintf(outfile,"%d\n",prop.regsPerBlock);
//
printf(" No of registers available in a streaming multiprocessor : %d\n",prop.regsPerMultiprocessor);
fprintf(outfile,"%d\n",prop.regsPerMultiprocessor);
//
printf(" Warp Size :(bytes) %d\n",prop.warpSize);
fprintf(outfile,"%d\n",prop.warpSize);
//
printf(" Grid Size :(bytes) %ld\n",prop.maxGridSize);
//
printf(" Total memory :(bytes) %ld\n",prop.totalGlobalMem);
fprintf(outfile,"%ld\n",prop.totalGlobalMem);
//
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
|
7487c12d46f507f1edea82103c8487d130e917e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/unpooling_layer.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/util/math_functions.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype* mask, const Dtype* argmax_count,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int pool_channel_offset = (n * channels + c) * pooled_height * pooled_width;
int pool_index = pool_channel_offset + ph * pooled_width + pw;
int channel_offset = (n * channels + c) * height * width;
const int top_index = channel_offset + static_cast<int>(mask[pool_index]);
if (argmax_count) {
const Dtype unpooled_act =
bottom_data[pool_index] / argmax_count[top_index];
caffe_gpu_atomic_add(unpooled_act, top_data + top_index);
} else {
top_data[top_index] = bottom_data[pool_index];
}
}
}
template <typename Dtype>
__global__ void AveUnpoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype* pool_count, const int num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, const int pad, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int w = index % width + pad;
int h = (index / width) % height + pad;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype top_datum = 0;
bottom_data += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
top_datum += bottom_data[ph * pooled_width + pw];
}
}
top_data[index] = top_datum / pool_count[h * width + w];
}
}
template <typename Dtype>
void UnpoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const bool overlapping = (stride_ < kernel_size_);
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
const Dtype* mask;
const Dtype* pool_count;
const Dtype* argmax_count = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
mask = bottom[1]->gpu_data();
if (overlapping) {
argmax_count = bottom[2]->gpu_data();
}
caffe_gpu_set(count, Dtype(0), top_data);
count = bottom[0]->count();
MaxUnpoolForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, mask, argmax_count, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
top_data);
break;
case PoolingParameter_PoolMethod_AVE:
pool_count = pool_count_.gpu_data();
AveUnpoolForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, pool_count, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
pad_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
NOT_IMPLEMENTED;
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxUnpoolBackward(const int nthreads,
const Dtype* top_diff, const Dtype* argmax_count,
const Dtype* mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_size, const int stride,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int top_offset = (n * channels + c) * height * width;
int top_index = top_offset + static_cast<int>(mask[index]);
int num_top_use = argmax_count ? argmax_count[top_index] : 1;
assert(num_top_use != 0);
bottom_diff[index] = top_diff[top_index] / num_top_use;
}
}
template <typename Dtype>
__global__ void AveUnpoolBackward(const int nthreads, const Dtype* top_diff,
const Dtype* pool_count, const int num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const int top_offset = (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += top_diff[top_offset + h * width + w] /
pool_count[h * width + w];
}
}
bottom_diff[index] = aveval;
}
}
template <typename Dtype>
void UnpoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
const bool overlapping = (stride_ < kernel_size_);
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
const Dtype* mask;
const Dtype* argmax_count = NULL;
const Dtype* pool_count;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
mask = bottom[1]->gpu_data();
if (overlapping) {
argmax_count = bottom[2]->gpu_data();
}
MaxUnpoolBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_count, mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_size_, stride_, bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
pool_count = pool_count_.gpu_data();
AveUnpoolBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, pool_count, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_,
kernel_size_, stride_, stride_, pad_, pad_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
NOT_IMPLEMENTED;
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(UnpoolingLayer);
} // namespace caffe
| 7487c12d46f507f1edea82103c8487d130e917e8.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/unpooling_layer.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/util/math_functions.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype* mask, const Dtype* argmax_count,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int pool_channel_offset = (n * channels + c) * pooled_height * pooled_width;
int pool_index = pool_channel_offset + ph * pooled_width + pw;
int channel_offset = (n * channels + c) * height * width;
const int top_index = channel_offset + static_cast<int>(mask[pool_index]);
if (argmax_count) {
const Dtype unpooled_act =
bottom_data[pool_index] / argmax_count[top_index];
caffe_gpu_atomic_add(unpooled_act, top_data + top_index);
} else {
top_data[top_index] = bottom_data[pool_index];
}
}
}
template <typename Dtype>
__global__ void AveUnpoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype* pool_count, const int num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, const int pad, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int w = index % width + pad;
int h = (index / width) % height + pad;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype top_datum = 0;
bottom_data += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
top_datum += bottom_data[ph * pooled_width + pw];
}
}
top_data[index] = top_datum / pool_count[h * width + w];
}
}
template <typename Dtype>
void UnpoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const bool overlapping = (stride_ < kernel_size_);
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
const Dtype* mask;
const Dtype* pool_count;
const Dtype* argmax_count = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
mask = bottom[1]->gpu_data();
if (overlapping) {
argmax_count = bottom[2]->gpu_data();
}
caffe_gpu_set(count, Dtype(0), top_data);
count = bottom[0]->count();
MaxUnpoolForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, mask, argmax_count, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
top_data);
break;
case PoolingParameter_PoolMethod_AVE:
pool_count = pool_count_.gpu_data();
AveUnpoolForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, pool_count, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
pad_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
NOT_IMPLEMENTED;
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxUnpoolBackward(const int nthreads,
const Dtype* top_diff, const Dtype* argmax_count,
const Dtype* mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_size, const int stride,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int top_offset = (n * channels + c) * height * width;
int top_index = top_offset + static_cast<int>(mask[index]);
int num_top_use = argmax_count ? argmax_count[top_index] : 1;
assert(num_top_use != 0);
bottom_diff[index] = top_diff[top_index] / num_top_use;
}
}
template <typename Dtype>
__global__ void AveUnpoolBackward(const int nthreads, const Dtype* top_diff,
const Dtype* pool_count, const int num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const int top_offset = (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += top_diff[top_offset + h * width + w] /
pool_count[h * width + w];
}
}
bottom_diff[index] = aveval;
}
}
template <typename Dtype>
void UnpoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
const bool overlapping = (stride_ < kernel_size_);
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
const Dtype* mask;
const Dtype* argmax_count = NULL;
const Dtype* pool_count;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
mask = bottom[1]->gpu_data();
if (overlapping) {
argmax_count = bottom[2]->gpu_data();
}
MaxUnpoolBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_count, mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_size_, stride_, bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
pool_count = pool_count_.gpu_data();
AveUnpoolBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, pool_count, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_,
kernel_size_, stride_, stride_, pad_, pad_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
NOT_IMPLEMENTED;
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(UnpoolingLayer);
} // namespace caffe
|
39d41e6925d0aedcbfb03c67ef4c428f625ffe89.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include "Utilities.cuh"
#include "InputOutput.h"
#include "TimingGPU.cuh"
// --- Problem size along one size. The computational domain is squared.
#define NUM 1024
#define BLOCKSIZEX 16
#define BLOCKSIZEY 16
// --- Double precision
//#define DOUBLE
#ifdef DOUBLE
#define Real double
#define ZERO 0.0
#define ONE 1.0
#define TWO 2.0
#define FOUR 4.0
// --- SOR relaxation parameter
const Real omega = 1.85;
#else
#define Real float
#define ZERO 0.0f
#define ONE 1.0f
#define TWO 2.0f
#define FOUR 4.0f
// --- SOR relaxation parameter
const Real omega = 1.85f;
#endif
// --- Split temperature into red and black arrays
//#define MEMORY_OPTIMIZATION
// --- Use texture memory
//#define TEXTURE
#ifdef TEXTURE
#ifdef DOUBLE
texture<int2, 1> t_aP;
texture<int2, 1> t_aW;
texture<int2, 1> t_aE;
texture<int2, 1> t_aS;
texture<int2, 1> t_aN;
texture<int2, 1> t_b;
static __inline__ __device__ double texFetch(texture<int2, 1> tex, int i)
{
int2 v = tex1Dfetch(tex, i);
return __hiloint2double(v.y, v.x);
}
#else
texture<float> t_aP;
texture<float> t_aW;
texture<float> t_aE;
texture<float> t_aS;
texture<float> t_aN;
texture<float> t_b;
static __inline__ __device__ float texFetch(texture<float> tex, int i)
{
return tex1Dfetch(tex, i);
}
#endif
#endif
/*****************************/
/* SET EQUATION COEFFICIENTS */
/*****************************/
void setEquationCoefficients(const int Nrows, const int Ncols, const Real TN, Real * __restrict h_aP, Real * __restrict h_aW, Real * __restrict h_aE,
Real * __restrict h_aS, Real * __restrict h_aN, Real * __restrict h_b)
{
for (int col = 0; col < Ncols; ++col) {
for (int row = 0; row < Nrows; ++row) {
int ind = col * Nrows + row;
h_b[ind] = ZERO;
// --- Left boundary condition: temperature is TW
if (col == 0) h_aW[ind] = ZERO;
else h_aW[ind] = ONE;
// --- Right boundary condition: temperature is TE
if (col == Ncols - 1) h_aE[ind] = ZERO;
else h_aE[ind] = ONE;
// --- Bottom boundary condition: temperature is TS
if (row == 0) h_aS[ind] = ZERO;
else h_aS[ind] = ONE;
// --- Top boundary condition: temperature is TN
if (row == Nrows - 1) {
h_aN[ind] = ZERO;
h_b[ind] = TN;
}
else h_aN[ind] = ONE;
h_aP[ind] = FOUR;
}
}
}
/********************************/
/* RED KERNEL - NO OPTIMIZATION */
/********************************/
template<class T>
__global__ void redKernelNoOptimization(const T * __restrict__ d_aP, const T * __restrict__ d_aW, const T * __restrict__ d_aE,
const T * __restrict__ d_aS, const T * __restrict__ d_aN, const T * __restrict__ d_b,
const T * __restrict__ d_tempBlack, T * __restrict__ d_tempRed, const T omega,
T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((tidx + tidy) % 2 != 0) return; // --- If we are not on a "red" pixel, then exit.
int ind_red = ( tidy * (NUM + 2)) + tidx; // --- Index for the "red" image
int ind = ((tidy - 1) * NUM) + tidx - 1; // --- Index for the coefficients
T temp_old = d_tempRed[ind_red];
T res = d_b[ind]
+ (d_aW[ind] * d_tempBlack[tidx + (tidy - 1) * (NUM + 2)]
+ d_aE[ind] * d_tempBlack[tidx + (tidy + 1) * (NUM + 2)]
+ d_aS[ind] * d_tempBlack[tidx - 1 + tidy * (NUM + 2)]
+ d_aN[ind] * d_tempBlack[tidx + 1 + tidy * (NUM + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / d_aP[ind]);
d_tempRed[ind_red] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_red] = res * res;
}
/**********************************/
/* BLACK KERNEL - NO OPTIMIZATION */
/**********************************/
template<class T>
__global__ void blackKernelNoOptimization(const T * __restrict__ d_aP, const T * __restrict__ d_aW, const T * __restrict__ d_aE,
const T * __restrict__ d_aS, const T * __restrict__ d_aN, const T * __restrict__ d_b,
const T * __restrict__ d_tempRed, T * __restrict__ d_tempBlack, const T omega,
T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((tidx + tidy) % 2 == 0) return; // --- If we are not on a "black" pixel, then exit.
int ind_black = (tidy * (NUM + 2)) + tidx; // --- Index for the "black" image
int ind = ((tidy - 1) * NUM) + tidx - 1; // --- Index for the coefficients
T temp_old = d_tempBlack[ind_black];
T res = d_b[ind]
+ (d_aW[ind] * d_tempRed[tidx + (tidy - 1) * (NUM + 2)]
+ d_aE[ind] * d_tempRed[tidx + (tidy + 1) * (NUM + 2)]
+ d_aS[ind] * d_tempRed[tidx - 1 + tidy * (NUM + 2)]
+ d_aN[ind] * d_tempRed[tidx + 1 + tidy * (NUM + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / d_aP[ind]);
d_tempBlack[ind_black] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_black] = res * res;
}
/************************/
/* RED KERNEL - TEXTURE */
/************************/
#ifdef TEXTURE
template<class T>
__global__ void redKernelTexture(const T * __restrict__ d_tempBlack, T * __restrict__ d_tempRed, const T omega, T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((tidx + tidy) % 2 != 0) return; // --- If we are not on a "red" pixel, then exit.
int ind_red = ( tidy * (NUM + 2)) + tidx; // --- Index for the "red" image
int ind = ((tidy - 1) * NUM) + tidx - 1; // --- Index for the coefficients
T temp_old = d_tempRed[ind_red];
T res = texFetch(t_b, ind)
+ (texFetch(t_aW, ind) * d_tempBlack[tidx + (tidy - 1) * (NUM + 2)]
+ texFetch(t_aE, ind) * d_tempBlack[tidx + (tidy + 1) * (NUM + 2)]
+ texFetch(t_aS, ind) * d_tempBlack[tidx - 1 + tidy * (NUM + 2)]
+ texFetch(t_aN, ind) * d_tempBlack[tidx + 1 + tidy * (NUM + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / texFetch(t_aP, ind));
d_tempRed[ind_red] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_red] = res * res;
}
#endif
/**************************/
/* BLACK KERNEL - TEXTURE */
/**************************/
#ifdef TEXTURE
template<class T>
__global__ void blackKernelTexture(const T * __restrict__ d_tempRed, T * __restrict__ d_tempBlack, const T omega, T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((tidx + tidy) % 2 == 0) return; // --- If we are not on a "black" pixel, then exit.
int ind_black = (tidy * (NUM + 2)) + tidx; // --- Index for the "black" image
int ind = ((tidy - 1) * NUM) + tidx - 1; // --- Index for the coefficients
T temp_old = d_tempBlack[ind_black];
T res = texFetch(t_b, ind)
+ (texFetch(t_aW, ind) * d_tempRed[tidx + (tidy - 1) * (NUM + 2)]
+ texFetch(t_aE, ind) * d_tempRed[tidx + (tidy + 1) * (NUM + 2)]
+ texFetch(t_aS, ind) * d_tempRed[tidx - 1 + tidy * (NUM + 2)]
+ texFetch(t_aN, ind) * d_tempRed[tidx + 1 + tidy * (NUM + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / texFetch(t_aP, ind));
d_tempBlack[ind_black] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_black] = res * res;
}
#endif
/************************************/
/* RED KERNEL - MEMORY OPTIMIZATION */
/************************************/
template<class T>
__global__ void redKernelMemoryOptimization(const T * __restrict__ d_aP, const T * __restrict__ d_aW, const T * __restrict__ d_aE,
const T * __restrict__ d_aS, const T * __restrict__ d_aN, const T * __restrict__ d_b,
const T * __restrict__ d_tempBlack, T * __restrict__ d_tempRed, const T omega,
T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
int ind_red = tidy * ((NUM >> 1) + 2) + tidx; // --- Local (red) index; problemSize of the matrix is ((NUM >> 1) + 2) x ((NUM >> 1) + 2)
int ind = 2 * tidx - (tidy & 1) - 1 + NUM * (tidy - 1); // --- Global index
T temp_old = d_tempRed[ind_red];
T res = d_b[ind]
+ (d_aW[ind] * d_tempBlack[tidx + (tidy - 1) * ((NUM >> 1) + 2)]
+ d_aE[ind] * d_tempBlack[tidx + (tidy + 1) * ((NUM >> 1) + 2)]
+ d_aS[ind] * d_tempBlack[tidx - (tidy & 1) + tidy * ((NUM >> 1) + 2)]
+ d_aN[ind] * d_tempBlack[tidx + ((tidy + 1) & 1) + tidy * ((NUM >> 1) + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / d_aP[ind]);
d_tempRed[ind_red] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_red] = res * res;
}
/**************************************/
/* BLACK KERNEL - MEMORY OPTIMIZATION */
/**************************************/
template<class T>
__global__ void blackKernelMemoryOptimization(const T * __restrict__ d_aP, const T * __restrict__ d_aW, const T * __restrict__ d_aE,
const T * __restrict__ d_aS, const T * __restrict__ d_aN, const T * __restrict__ d_b,
const T * __restrict__ d_tempRed, T * __restrict__ d_tempBlack, const T omega,
T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
int ind_black = tidy * ((NUM >> 1) + 2) + tidx; // --- Local (red) index; problemSize of the matrix is ((NUM >> 1) + 2) x ((NUM >> 1) + 2)
int ind = 2 * tidx - ((tidy + 1) & 1) - 1 + NUM * (tidy - 1); // --- Global index
T temp_old = d_tempBlack[ind_black];
T res = d_b[ind]
+ (d_aW[ind] * d_tempRed[tidx + (tidy - 1) * ((NUM >> 1) + 2)]
+ d_aE[ind] * d_tempRed[tidx + (tidy + 1) * ((NUM >> 1) + 2)]
+ d_aS[ind] * d_tempRed[tidx - ((tidy + 1) & 1) + tidy * ((NUM >> 1) + 2)]
+ d_aN[ind] * d_tempRed[tidx + (tidy & 1) + tidy * ((NUM >> 1) + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / d_aP[ind]);
d_tempBlack[ind_black] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_black] = res * res;
}
/************************************************/
/* RED KERNEL - MEMORY OPTIMIZATION AND TEXTURE */
/************************************************/
#ifdef TEXTURE
template<class T>
__global__ void redKernelMemoryOptimizationTexture(const T * __restrict__ d_tempBlack, T * __restrict__ d_tempRed, const T omega, T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
int ind_red = tidy * ((NUM >> 1) + 2) + tidx; // --- Local (red) index; problemSize of the matrix is ((NUM >> 1) + 2) x ((NUM >> 1) + 2)
int ind = 2 * tidx - (tidy & 1) - 1 + NUM * (tidy - 1); // --- Global index
T temp_old = d_tempRed[ind_red];
T res = texFetch(t_b, ind)
+ (texFetch(t_aW, ind) * d_tempBlack[tidx + (tidy - 1) * ((NUM >> 1) + 2)]
+ texFetch(t_aE, ind) * d_tempBlack[tidx + (tidy + 1) * ((NUM >> 1) + 2)]
+ texFetch(t_aS, ind) * d_tempBlack[tidx - (tidy & 1) + tidy * ((NUM >> 1) + 2)]
+ texFetch(t_aN, ind) * d_tempBlack[tidx + ((tidy + 1) & 1) + tidy * ((NUM >> 1) + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / texFetch(t_aP, ind));
d_tempRed[ind_red] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_red] = res * res;
}
#endif
/**************************************************/
/* BLACK KERNEL - MEMORY OPTIMIZATION AND TEXTURE */
/**************************************************/
#ifdef TEXTURE
template<class T>
__global__ void blackKernelMemoryOptimizationTexture(const T * __restrict__ d_tempRed, T * __restrict__ d_tempBlack, const T omega, T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
int ind_black = tidy * ((NUM >> 1) + 2) + tidx; // --- Local (red) index; problemSize of the matrix is ((NUM >> 1) + 2) x ((NUM >> 1) + 2)
int ind = 2 * tidx - ((tidy + 1) & 1) - 1 + NUM * (tidy - 1); // --- Global index
T temp_old = d_tempBlack[ind_black];
T res = texFetch(t_b, ind)
+ (texFetch(t_aW, ind) * d_tempRed[tidx + (tidy - 1) * ((NUM >> 1) + 2)]
+ texFetch(t_aE, ind) * d_tempRed[tidx + (tidy + 1) * ((NUM >> 1) + 2)]
+ texFetch(t_aS, ind) * d_tempRed[tidx - ((tidy + 1) & 1) + tidy * ((NUM >> 1) + 2)]
+ texFetch(t_aN, ind) * d_tempRed[tidx + (tidy & 1) + tidy * ((NUM >> 1) + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / texFetch(t_aP, ind));
d_tempBlack[ind_black] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_black] = res * res;
}
#endif
/********/
/* MAIN */
/********/
int main(void) {
TimingGPU timerGPU;
/**********************/
/* PROBLEM PARAMETERS */
/**********************/
Real TN = 1.0; // --- Temperature at northern boundary
//Real dx = L / NUM; // --- Discretization step along x-axis
//Real dy = H / NUM; // --- Discretization step along y-axis
// --- Number of cells in x and y directions including boundary cells
#ifdef MEMORY_OPTIMIZATION
int Nrows = (NUM / 2) + 2;
#else
int Nrows = NUM + 2;
#endif
int Ncols = NUM + 2;
// --- Problem size and computational size
int problemSize = NUM * NUM;
int computationalSize = Nrows * Ncols;
/*************************/
/* ITERATIONS PARAMETERS */
/*************************/
Real tol = 1.e-6; // --- SOR iteration tolerance
int maxIter = 1e6;
//int maxIter = 1e3; // --- Maximum number of iterations
//int maxIter = 200; // --- Maximum number of iterations
int iter;
/***************************/
/* HOST MEMORY ALLOCATIONS */
/***************************/
// --- Equation coefficients
Real *h_aP = (Real *)calloc(problemSize, sizeof(Real)); // --- Self coefficients
Real *h_aW = (Real *)calloc(problemSize, sizeof(Real)); // --- West neighbor coefficients
Real *h_aE = (Real *)calloc(problemSize, sizeof(Real)); // --- East neighbor coefficients
Real *h_aS = (Real *)calloc(problemSize, sizeof(Real)); // --- South neighbor coefficients
Real *h_aN = (Real *)calloc(problemSize, sizeof(Real)); // --- North neighbor coefficients
// --- Right-hand side array
Real *h_b = (Real *)calloc(problemSize, sizeof(Real));
Real *h_tempRed = (Real *)calloc(computationalSize, sizeof(Real)); // --- Red-cells temperature array
Real *h_tempBlack = (Real *)calloc(computationalSize, sizeof(Real)); // --- Black-cells temperature array
// --- Set equation coefficients
setEquationCoefficients(NUM, NUM, TN, h_aP, h_aW, h_aE, h_aS, h_aN, h_b);
/****************************/
/* SET GRID AND BLOCK SIZES */
/****************************/
dim3 dimBlock(BLOCKSIZEX, BLOCKSIZEY);
#ifdef MEMORY_OPTIMIZATION
dim3 dimGrid(iDivUp(NUM / 2, BLOCKSIZEX), iDivUp(NUM, BLOCKSIZEY));
#else
dim3 dimGrid(iDivUp(NUM, BLOCKSIZEX), iDivUp(NUM, BLOCKSIZEY));
#endif
printf("Problem problemSize: %d x %d \n", NUM, NUM);
timerGPU.StartCounter();
/*****************************/
/* DEVICE MEMORY ALLOCATIONS */
/*****************************/
Real *d_aP; gpuErrchk(hipMalloc((void**)&d_aP, problemSize * sizeof(Real)));
Real *d_aW; gpuErrchk(hipMalloc((void**)&d_aW, problemSize * sizeof(Real)));
Real *d_aE; gpuErrchk(hipMalloc((void**)&d_aE, problemSize * sizeof(Real)));
Real *d_aS; gpuErrchk(hipMalloc((void**)&d_aS, problemSize * sizeof(Real)));
Real *d_aN; gpuErrchk(hipMalloc((void**)&d_aN, problemSize * sizeof(Real)));
Real *d_b; gpuErrchk(hipMalloc((void**)&d_b, problemSize * sizeof(Real)));
Real *d_tempRed; gpuErrchk(hipMalloc((void**)&d_tempRed, computationalSize * sizeof(Real)));
#ifdef MEMORY_OPTIMIZATION
Real *d_tempBlack; gpuErrchk(hipMalloc((void**)&d_tempBlack, computationalSize * sizeof(Real)));
#endif
Real *d_L2DifferenceArray; gpuErrchk(hipMalloc((void**)&d_L2DifferenceArray, computationalSize * sizeof(Real)));
/*****************************/
/* HOST-DEVICE MEMORY COPIES */
/*****************************/
gpuErrchk(hipMemcpy(d_aP, h_aP, problemSize * sizeof(Real), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_aW, h_aW, problemSize * sizeof(Real), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_aE, h_aE, problemSize * sizeof(Real), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_aS, h_aS, problemSize * sizeof(Real), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_aN, h_aN, problemSize * sizeof(Real), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_b, h_b, problemSize * sizeof(Real), hipMemcpyHostToDevice));
gpuErrchk(hipMemset(d_tempRed, 0, computationalSize * sizeof(Real)));
#ifdef MEMORY_OPTIMIZATION
gpuErrchk(hipMemset(d_tempBlack, 0, computationalSize * sizeof(Real)));
#endif
/********************/
/* TEXTURE BINDINGS */
/********************/
#ifdef TEXTURE
gpuErrchk(hipBindTexture(NULL, t_aP, d_aP, problemSize * sizeof(Real)));
gpuErrchk(hipBindTexture(NULL, t_aW, d_aW, problemSize * sizeof(Real)));
gpuErrchk(hipBindTexture(NULL, t_aE, d_aE, problemSize * sizeof(Real)));
gpuErrchk(hipBindTexture(NULL, t_aS, d_aS, problemSize * sizeof(Real)));
gpuErrchk(hipBindTexture(NULL, t_aN, d_aN, problemSize * sizeof(Real)));
gpuErrchk(hipBindTexture(NULL, t_b, d_b, problemSize * sizeof(Real)));
#endif
/**************/
/* ITERATIONS */
/**************/
for (iter = 0; iter < maxIter; ++iter) {
// --- Update red cells
#if defined(TEXTURE) && defined(MEMORY_OPTIMIZATION)
redKernelMemoryOptimizationTexture << <dimGrid, dimBlock >> > (d_tempBlack, d_tempRed, omega, d_L2DifferenceArray);
#elif defined(TEXTURE) && !defined(MEMORY_OPTIMIZATION)
redKernelTexture << <dimGrid, dimBlock >> > (d_tempRed, d_tempRed, omega, d_L2DifferenceArray);
#elif !defined(TEXTURE) && defined(MEMORY_OPTIMIZATION)
redKernelMemoryOptimization << <dimGrid, dimBlock >> > (d_aP, d_aW, d_aE, d_aS, d_aN, d_b, d_tempBlack, d_tempRed, omega, d_L2DifferenceArray);
#else
redKernelNoOptimization << <dimGrid, dimBlock >> > (d_aP, d_aW, d_aE, d_aS, d_aN, d_b, d_tempRed, d_tempRed, omega, d_L2DifferenceArray);
#endif
// --- Update black cells
#if defined(TEXTURE) && defined(MEMORY_OPTIMIZATION)
blackKernelMemoryOptimizationTexture << <dimGrid, dimBlock >> > (d_tempRed, d_tempBlack, omega, d_L2DifferenceArray);
#elif defined(TEXTURE) && !defined(MEMORY_OPTIMIZATION)
blackKernelTexture << <dimGrid, dimBlock >> > (d_tempRed, d_tempRed, omega, d_L2DifferenceArray);
#elif !defined(TEXTURE) && defined(MEMORY_OPTIMIZATION)
blackKernelMemoryOptimization << <dimGrid, dimBlock >> > (d_aP, d_aW, d_aE, d_aS, d_aN, d_b, d_tempRed, d_tempBlack, omega, d_L2DifferenceArray);
#else
blackKernelNoOptimization << <dimGrid, dimBlock >> > (d_aP, d_aW, d_aE, d_aS, d_aN, d_b, d_tempRed, d_tempRed, omega, d_L2DifferenceArray);
#endif
// --- Calculate residual
Real norm_L2 = thrust::reduce(thrust::device_pointer_cast(d_L2DifferenceArray), thrust::device_pointer_cast(d_L2DifferenceArray) + computationalSize);
norm_L2 = sqrt(norm_L2 / ((Real)problemSize));
if (iter % 100 == 0) printf("%5d, %0.6f\n", iter, norm_L2);
// --- If tolerance has been reached, end SOR iterations
if (norm_L2 < tol) break;
}
// --- Transfer final red and black temperatures back to the host
gpuErrchk(hipMemcpy(h_tempRed, d_tempRed, computationalSize * sizeof(Real), hipMemcpyDeviceToHost));
#ifdef MEMORY_OPTIMIZATION
//hipMemcpy(h_tempBlack, d_tempRed, computationalSize * sizeof(Real), hipMemcpyDeviceToHost);
hipMemcpy(h_tempBlack, d_tempBlack, computationalSize * sizeof(Real), hipMemcpyDeviceToHost);
#endif
double runtime = timerGPU.GetCounter();
printf("GPU\n");
printf("Iterations: %i\n", iter);
printf("Total time: %f s\n", runtime / 1000.0);
/****************************/
/* SAVING THE FINAL RESULTS */
/****************************/
Real *h_T = (Real *)calloc(NUM * NUM, sizeof(Real));
// for (int row = 1; row < NUM + 1; ++row) {
// for (int col = 1; col < NUM + 1; ++col) {
//
// // --- Red cells
// if ((row + col) % 2 == 0) {
// int ind = col * Nrows + (row + (col % 2)) / 2;
// h_T[((col - 1) * NUM) + row - 1] = h_tempRed[ind];
// }
// // --- Black cells
// else {
// int ind = col * Nrows + (row + ((col + 1) % 2)) / 2;
//#ifdef MEMORY_OPTIMIZATION
// h_T[((col - 1) * NUM) + row - 1] = h_tempBlack[ind];
//#else
// h_T[((col - 1) * NUM) + row - 1] = h_tempRed[ind];
//#endif
// }
// }
// }
for (int row = 1; row < NUM + 1; ++row) {
for (int col = 1; col < NUM + 1; ++col) {
// --- Red cells
if ((row + col) % 2 == 0) {
#ifdef MEMORY_OPTIMIZATION
int ind = col * Nrows + (row + (col % 2)) / 2;
#else
int ind = col * Nrows + row;
#endif
h_T[(col - 1) * NUM + row - 1] = h_tempRed[ind];
}
// --- Black cells
else {
#ifdef MEMORY_OPTIMIZATION
int ind = col * Nrows + (row + ((col + 1) % 2)) / 2;
h_T[((col - 1) * NUM) + row - 1] = h_tempBlack[ind];
#else
int ind = col * Nrows + row;
h_T[((col - 1) * NUM) + row - 1] = h_tempRed[ind];
#endif
}
}
}
printf("Saving...\n");
saveCPUrealtxt(h_T, "D:\\Project\\Laplace_SOR_Red_Black\\Laplace_SOR_Red_Black\\Temp.txt", NUM * NUM);
#ifndef MEMORY_OPTIMIZATION
saveCPUrealtxt(h_tempRed, "D:\\Project\\Laplace_SOR_Red_Black\\Laplace_SOR_Red_Black\\Temp_red.txt", (NUM + 2) * (NUM + 2));
#endif
// --- Free device memory
gpuErrchk(hipFree(d_aP));
gpuErrchk(hipFree(d_aW));
gpuErrchk(hipFree(d_aE));
gpuErrchk(hipFree(d_aS));
gpuErrchk(hipFree(d_aN));
gpuErrchk(hipFree(d_b));
gpuErrchk(hipFree(d_tempRed));
#ifdef MEMORY_OPTIMIZATION
gpuErrchk(hipFree(d_tempBlack));
#endif
gpuErrchk(hipFree(d_L2DifferenceArray));
#ifdef TEXTURE
// --- Unbind textures
gpuErrchk(hipUnbindTexture(t_aP));
gpuErrchk(hipUnbindTexture(t_aW));
gpuErrchk(hipUnbindTexture(t_aE));
gpuErrchk(hipUnbindTexture(t_aS));
gpuErrchk(hipUnbindTexture(t_aN));
gpuErrchk(hipUnbindTexture(t_b));
#endif
free(h_aP);
free(h_aW);
free(h_aE);
free(h_aS);
free(h_aN);
free(h_b);
free(h_tempRed);
free(h_tempBlack);
gpuErrchk(hipDeviceReset());
return 0;
}
| 39d41e6925d0aedcbfb03c67ef4c428f625ffe89.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include "Utilities.cuh"
#include "InputOutput.h"
#include "TimingGPU.cuh"
// --- Problem size along one size. The computational domain is squared.
#define NUM 1024
#define BLOCKSIZEX 16
#define BLOCKSIZEY 16
// --- Double precision
//#define DOUBLE
#ifdef DOUBLE
#define Real double
#define ZERO 0.0
#define ONE 1.0
#define TWO 2.0
#define FOUR 4.0
// --- SOR relaxation parameter
const Real omega = 1.85;
#else
#define Real float
#define ZERO 0.0f
#define ONE 1.0f
#define TWO 2.0f
#define FOUR 4.0f
// --- SOR relaxation parameter
const Real omega = 1.85f;
#endif
// --- Split temperature into red and black arrays
//#define MEMORY_OPTIMIZATION
// --- Use texture memory
//#define TEXTURE
#ifdef TEXTURE
#ifdef DOUBLE
texture<int2, 1> t_aP;
texture<int2, 1> t_aW;
texture<int2, 1> t_aE;
texture<int2, 1> t_aS;
texture<int2, 1> t_aN;
texture<int2, 1> t_b;
static __inline__ __device__ double texFetch(texture<int2, 1> tex, int i)
{
int2 v = tex1Dfetch(tex, i);
return __hiloint2double(v.y, v.x);
}
#else
texture<float> t_aP;
texture<float> t_aW;
texture<float> t_aE;
texture<float> t_aS;
texture<float> t_aN;
texture<float> t_b;
static __inline__ __device__ float texFetch(texture<float> tex, int i)
{
return tex1Dfetch(tex, i);
}
#endif
#endif
/*****************************/
/* SET EQUATION COEFFICIENTS */
/*****************************/
void setEquationCoefficients(const int Nrows, const int Ncols, const Real TN, Real * __restrict h_aP, Real * __restrict h_aW, Real * __restrict h_aE,
Real * __restrict h_aS, Real * __restrict h_aN, Real * __restrict h_b)
{
for (int col = 0; col < Ncols; ++col) {
for (int row = 0; row < Nrows; ++row) {
int ind = col * Nrows + row;
h_b[ind] = ZERO;
// --- Left boundary condition: temperature is TW
if (col == 0) h_aW[ind] = ZERO;
else h_aW[ind] = ONE;
// --- Right boundary condition: temperature is TE
if (col == Ncols - 1) h_aE[ind] = ZERO;
else h_aE[ind] = ONE;
// --- Bottom boundary condition: temperature is TS
if (row == 0) h_aS[ind] = ZERO;
else h_aS[ind] = ONE;
// --- Top boundary condition: temperature is TN
if (row == Nrows - 1) {
h_aN[ind] = ZERO;
h_b[ind] = TN;
}
else h_aN[ind] = ONE;
h_aP[ind] = FOUR;
}
}
}
/********************************/
/* RED KERNEL - NO OPTIMIZATION */
/********************************/
template<class T>
__global__ void redKernelNoOptimization(const T * __restrict__ d_aP, const T * __restrict__ d_aW, const T * __restrict__ d_aE,
const T * __restrict__ d_aS, const T * __restrict__ d_aN, const T * __restrict__ d_b,
const T * __restrict__ d_tempBlack, T * __restrict__ d_tempRed, const T omega,
T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((tidx + tidy) % 2 != 0) return; // --- If we are not on a "red" pixel, then exit.
int ind_red = ( tidy * (NUM + 2)) + tidx; // --- Index for the "red" image
int ind = ((tidy - 1) * NUM) + tidx - 1; // --- Index for the coefficients
T temp_old = d_tempRed[ind_red];
T res = d_b[ind]
+ (d_aW[ind] * d_tempBlack[tidx + (tidy - 1) * (NUM + 2)]
+ d_aE[ind] * d_tempBlack[tidx + (tidy + 1) * (NUM + 2)]
+ d_aS[ind] * d_tempBlack[tidx - 1 + tidy * (NUM + 2)]
+ d_aN[ind] * d_tempBlack[tidx + 1 + tidy * (NUM + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / d_aP[ind]);
d_tempRed[ind_red] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_red] = res * res;
}
/**********************************/
/* BLACK KERNEL - NO OPTIMIZATION */
/**********************************/
template<class T>
__global__ void blackKernelNoOptimization(const T * __restrict__ d_aP, const T * __restrict__ d_aW, const T * __restrict__ d_aE,
const T * __restrict__ d_aS, const T * __restrict__ d_aN, const T * __restrict__ d_b,
const T * __restrict__ d_tempRed, T * __restrict__ d_tempBlack, const T omega,
T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((tidx + tidy) % 2 == 0) return; // --- If we are not on a "black" pixel, then exit.
int ind_black = (tidy * (NUM + 2)) + tidx; // --- Index for the "black" image
int ind = ((tidy - 1) * NUM) + tidx - 1; // --- Index for the coefficients
T temp_old = d_tempBlack[ind_black];
T res = d_b[ind]
+ (d_aW[ind] * d_tempRed[tidx + (tidy - 1) * (NUM + 2)]
+ d_aE[ind] * d_tempRed[tidx + (tidy + 1) * (NUM + 2)]
+ d_aS[ind] * d_tempRed[tidx - 1 + tidy * (NUM + 2)]
+ d_aN[ind] * d_tempRed[tidx + 1 + tidy * (NUM + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / d_aP[ind]);
d_tempBlack[ind_black] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_black] = res * res;
}
/************************/
/* RED KERNEL - TEXTURE */
/************************/
#ifdef TEXTURE
template<class T>
__global__ void redKernelTexture(const T * __restrict__ d_tempBlack, T * __restrict__ d_tempRed, const T omega, T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((tidx + tidy) % 2 != 0) return; // --- If we are not on a "red" pixel, then exit.
int ind_red = ( tidy * (NUM + 2)) + tidx; // --- Index for the "red" image
int ind = ((tidy - 1) * NUM) + tidx - 1; // --- Index for the coefficients
T temp_old = d_tempRed[ind_red];
T res = texFetch(t_b, ind)
+ (texFetch(t_aW, ind) * d_tempBlack[tidx + (tidy - 1) * (NUM + 2)]
+ texFetch(t_aE, ind) * d_tempBlack[tidx + (tidy + 1) * (NUM + 2)]
+ texFetch(t_aS, ind) * d_tempBlack[tidx - 1 + tidy * (NUM + 2)]
+ texFetch(t_aN, ind) * d_tempBlack[tidx + 1 + tidy * (NUM + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / texFetch(t_aP, ind));
d_tempRed[ind_red] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_red] = res * res;
}
#endif
/**************************/
/* BLACK KERNEL - TEXTURE */
/**************************/
#ifdef TEXTURE
template<class T>
__global__ void blackKernelTexture(const T * __restrict__ d_tempRed, T * __restrict__ d_tempBlack, const T omega, T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((tidx + tidy) % 2 == 0) return; // --- If we are not on a "black" pixel, then exit.
int ind_black = (tidy * (NUM + 2)) + tidx; // --- Index for the "black" image
int ind = ((tidy - 1) * NUM) + tidx - 1; // --- Index for the coefficients
T temp_old = d_tempBlack[ind_black];
T res = texFetch(t_b, ind)
+ (texFetch(t_aW, ind) * d_tempRed[tidx + (tidy - 1) * (NUM + 2)]
+ texFetch(t_aE, ind) * d_tempRed[tidx + (tidy + 1) * (NUM + 2)]
+ texFetch(t_aS, ind) * d_tempRed[tidx - 1 + tidy * (NUM + 2)]
+ texFetch(t_aN, ind) * d_tempRed[tidx + 1 + tidy * (NUM + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / texFetch(t_aP, ind));
d_tempBlack[ind_black] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_black] = res * res;
}
#endif
/************************************/
/* RED KERNEL - MEMORY OPTIMIZATION */
/************************************/
template<class T>
__global__ void redKernelMemoryOptimization(const T * __restrict__ d_aP, const T * __restrict__ d_aW, const T * __restrict__ d_aE,
const T * __restrict__ d_aS, const T * __restrict__ d_aN, const T * __restrict__ d_b,
const T * __restrict__ d_tempBlack, T * __restrict__ d_tempRed, const T omega,
T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
int ind_red = tidy * ((NUM >> 1) + 2) + tidx; // --- Local (red) index; problemSize of the matrix is ((NUM >> 1) + 2) x ((NUM >> 1) + 2)
int ind = 2 * tidx - (tidy & 1) - 1 + NUM * (tidy - 1); // --- Global index
T temp_old = d_tempRed[ind_red];
T res = d_b[ind]
+ (d_aW[ind] * d_tempBlack[tidx + (tidy - 1) * ((NUM >> 1) + 2)]
+ d_aE[ind] * d_tempBlack[tidx + (tidy + 1) * ((NUM >> 1) + 2)]
+ d_aS[ind] * d_tempBlack[tidx - (tidy & 1) + tidy * ((NUM >> 1) + 2)]
+ d_aN[ind] * d_tempBlack[tidx + ((tidy + 1) & 1) + tidy * ((NUM >> 1) + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / d_aP[ind]);
d_tempRed[ind_red] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_red] = res * res;
}
/**************************************/
/* BLACK KERNEL - MEMORY OPTIMIZATION */
/**************************************/
template<class T>
__global__ void blackKernelMemoryOptimization(const T * __restrict__ d_aP, const T * __restrict__ d_aW, const T * __restrict__ d_aE,
const T * __restrict__ d_aS, const T * __restrict__ d_aN, const T * __restrict__ d_b,
const T * __restrict__ d_tempRed, T * __restrict__ d_tempBlack, const T omega,
T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
int ind_black = tidy * ((NUM >> 1) + 2) + tidx; // --- Local (red) index; problemSize of the matrix is ((NUM >> 1) + 2) x ((NUM >> 1) + 2)
int ind = 2 * tidx - ((tidy + 1) & 1) - 1 + NUM * (tidy - 1); // --- Global index
T temp_old = d_tempBlack[ind_black];
T res = d_b[ind]
+ (d_aW[ind] * d_tempRed[tidx + (tidy - 1) * ((NUM >> 1) + 2)]
+ d_aE[ind] * d_tempRed[tidx + (tidy + 1) * ((NUM >> 1) + 2)]
+ d_aS[ind] * d_tempRed[tidx - ((tidy + 1) & 1) + tidy * ((NUM >> 1) + 2)]
+ d_aN[ind] * d_tempRed[tidx + (tidy & 1) + tidy * ((NUM >> 1) + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / d_aP[ind]);
d_tempBlack[ind_black] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_black] = res * res;
}
/************************************************/
/* RED KERNEL - MEMORY OPTIMIZATION AND TEXTURE */
/************************************************/
#ifdef TEXTURE
template<class T>
__global__ void redKernelMemoryOptimizationTexture(const T * __restrict__ d_tempBlack, T * __restrict__ d_tempRed, const T omega, T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
int ind_red = tidy * ((NUM >> 1) + 2) + tidx; // --- Local (red) index; problemSize of the matrix is ((NUM >> 1) + 2) x ((NUM >> 1) + 2)
int ind = 2 * tidx - (tidy & 1) - 1 + NUM * (tidy - 1); // --- Global index
T temp_old = d_tempRed[ind_red];
T res = texFetch(t_b, ind)
+ (texFetch(t_aW, ind) * d_tempBlack[tidx + (tidy - 1) * ((NUM >> 1) + 2)]
+ texFetch(t_aE, ind) * d_tempBlack[tidx + (tidy + 1) * ((NUM >> 1) + 2)]
+ texFetch(t_aS, ind) * d_tempBlack[tidx - (tidy & 1) + tidy * ((NUM >> 1) + 2)]
+ texFetch(t_aN, ind) * d_tempBlack[tidx + ((tidy + 1) & 1) + tidy * ((NUM >> 1) + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / texFetch(t_aP, ind));
d_tempRed[ind_red] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_red] = res * res;
}
#endif
/**************************************************/
/* BLACK KERNEL - MEMORY OPTIMIZATION AND TEXTURE */
/**************************************************/
#ifdef TEXTURE
template<class T>
__global__ void blackKernelMemoryOptimizationTexture(const T * __restrict__ d_tempRed, T * __restrict__ d_tempBlack, const T omega, T * __restrict__ d_L2DifferenceArray)
{
// --- Addressing the interior of the (NUM + 2) x (NUM + 2) region
const int tidx = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y + 1;
int ind_black = tidy * ((NUM >> 1) + 2) + tidx; // --- Local (red) index; problemSize of the matrix is ((NUM >> 1) + 2) x ((NUM >> 1) + 2)
int ind = 2 * tidx - ((tidy + 1) & 1) - 1 + NUM * (tidy - 1); // --- Global index
T temp_old = d_tempBlack[ind_black];
T res = texFetch(t_b, ind)
+ (texFetch(t_aW, ind) * d_tempRed[tidx + (tidy - 1) * ((NUM >> 1) + 2)]
+ texFetch(t_aE, ind) * d_tempRed[tidx + (tidy + 1) * ((NUM >> 1) + 2)]
+ texFetch(t_aS, ind) * d_tempRed[tidx - ((tidy + 1) & 1) + tidy * ((NUM >> 1) + 2)]
+ texFetch(t_aN, ind) * d_tempRed[tidx + (tidy & 1) + tidy * ((NUM >> 1) + 2)]);
T temp_new = temp_old * (ONE - omega) + omega * (res / texFetch(t_aP, ind));
d_tempBlack[ind_black] = temp_new;
res = temp_new - temp_old;
d_L2DifferenceArray[ind_black] = res * res;
}
#endif
/********/
/* MAIN */
/********/
int main(void) {
TimingGPU timerGPU;
/**********************/
/* PROBLEM PARAMETERS */
/**********************/
Real TN = 1.0; // --- Temperature at northern boundary
//Real dx = L / NUM; // --- Discretization step along x-axis
//Real dy = H / NUM; // --- Discretization step along y-axis
// --- Number of cells in x and y directions including boundary cells
#ifdef MEMORY_OPTIMIZATION
int Nrows = (NUM / 2) + 2;
#else
int Nrows = NUM + 2;
#endif
int Ncols = NUM + 2;
// --- Problem size and computational size
int problemSize = NUM * NUM;
int computationalSize = Nrows * Ncols;
/*************************/
/* ITERATIONS PARAMETERS */
/*************************/
Real tol = 1.e-6; // --- SOR iteration tolerance
int maxIter = 1e6;
//int maxIter = 1e3; // --- Maximum number of iterations
//int maxIter = 200; // --- Maximum number of iterations
int iter;
/***************************/
/* HOST MEMORY ALLOCATIONS */
/***************************/
// --- Equation coefficients
Real *h_aP = (Real *)calloc(problemSize, sizeof(Real)); // --- Self coefficients
Real *h_aW = (Real *)calloc(problemSize, sizeof(Real)); // --- West neighbor coefficients
Real *h_aE = (Real *)calloc(problemSize, sizeof(Real)); // --- East neighbor coefficients
Real *h_aS = (Real *)calloc(problemSize, sizeof(Real)); // --- South neighbor coefficients
Real *h_aN = (Real *)calloc(problemSize, sizeof(Real)); // --- North neighbor coefficients
// --- Right-hand side array
Real *h_b = (Real *)calloc(problemSize, sizeof(Real));
Real *h_tempRed = (Real *)calloc(computationalSize, sizeof(Real)); // --- Red-cells temperature array
Real *h_tempBlack = (Real *)calloc(computationalSize, sizeof(Real)); // --- Black-cells temperature array
// --- Set equation coefficients
setEquationCoefficients(NUM, NUM, TN, h_aP, h_aW, h_aE, h_aS, h_aN, h_b);
/****************************/
/* SET GRID AND BLOCK SIZES */
/****************************/
dim3 dimBlock(BLOCKSIZEX, BLOCKSIZEY);
#ifdef MEMORY_OPTIMIZATION
dim3 dimGrid(iDivUp(NUM / 2, BLOCKSIZEX), iDivUp(NUM, BLOCKSIZEY));
#else
dim3 dimGrid(iDivUp(NUM, BLOCKSIZEX), iDivUp(NUM, BLOCKSIZEY));
#endif
printf("Problem problemSize: %d x %d \n", NUM, NUM);
timerGPU.StartCounter();
/*****************************/
/* DEVICE MEMORY ALLOCATIONS */
/*****************************/
Real *d_aP; gpuErrchk(cudaMalloc((void**)&d_aP, problemSize * sizeof(Real)));
Real *d_aW; gpuErrchk(cudaMalloc((void**)&d_aW, problemSize * sizeof(Real)));
Real *d_aE; gpuErrchk(cudaMalloc((void**)&d_aE, problemSize * sizeof(Real)));
Real *d_aS; gpuErrchk(cudaMalloc((void**)&d_aS, problemSize * sizeof(Real)));
Real *d_aN; gpuErrchk(cudaMalloc((void**)&d_aN, problemSize * sizeof(Real)));
Real *d_b; gpuErrchk(cudaMalloc((void**)&d_b, problemSize * sizeof(Real)));
Real *d_tempRed; gpuErrchk(cudaMalloc((void**)&d_tempRed, computationalSize * sizeof(Real)));
#ifdef MEMORY_OPTIMIZATION
Real *d_tempBlack; gpuErrchk(cudaMalloc((void**)&d_tempBlack, computationalSize * sizeof(Real)));
#endif
Real *d_L2DifferenceArray; gpuErrchk(cudaMalloc((void**)&d_L2DifferenceArray, computationalSize * sizeof(Real)));
/*****************************/
/* HOST-DEVICE MEMORY COPIES */
/*****************************/
gpuErrchk(cudaMemcpy(d_aP, h_aP, problemSize * sizeof(Real), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_aW, h_aW, problemSize * sizeof(Real), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_aE, h_aE, problemSize * sizeof(Real), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_aS, h_aS, problemSize * sizeof(Real), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_aN, h_aN, problemSize * sizeof(Real), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, h_b, problemSize * sizeof(Real), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemset(d_tempRed, 0, computationalSize * sizeof(Real)));
#ifdef MEMORY_OPTIMIZATION
gpuErrchk(cudaMemset(d_tempBlack, 0, computationalSize * sizeof(Real)));
#endif
/********************/
/* TEXTURE BINDINGS */
/********************/
#ifdef TEXTURE
gpuErrchk(cudaBindTexture(NULL, t_aP, d_aP, problemSize * sizeof(Real)));
gpuErrchk(cudaBindTexture(NULL, t_aW, d_aW, problemSize * sizeof(Real)));
gpuErrchk(cudaBindTexture(NULL, t_aE, d_aE, problemSize * sizeof(Real)));
gpuErrchk(cudaBindTexture(NULL, t_aS, d_aS, problemSize * sizeof(Real)));
gpuErrchk(cudaBindTexture(NULL, t_aN, d_aN, problemSize * sizeof(Real)));
gpuErrchk(cudaBindTexture(NULL, t_b, d_b, problemSize * sizeof(Real)));
#endif
/**************/
/* ITERATIONS */
/**************/
for (iter = 0; iter < maxIter; ++iter) {
// --- Update red cells
#if defined(TEXTURE) && defined(MEMORY_OPTIMIZATION)
redKernelMemoryOptimizationTexture << <dimGrid, dimBlock >> > (d_tempBlack, d_tempRed, omega, d_L2DifferenceArray);
#elif defined(TEXTURE) && !defined(MEMORY_OPTIMIZATION)
redKernelTexture << <dimGrid, dimBlock >> > (d_tempRed, d_tempRed, omega, d_L2DifferenceArray);
#elif !defined(TEXTURE) && defined(MEMORY_OPTIMIZATION)
redKernelMemoryOptimization << <dimGrid, dimBlock >> > (d_aP, d_aW, d_aE, d_aS, d_aN, d_b, d_tempBlack, d_tempRed, omega, d_L2DifferenceArray);
#else
redKernelNoOptimization << <dimGrid, dimBlock >> > (d_aP, d_aW, d_aE, d_aS, d_aN, d_b, d_tempRed, d_tempRed, omega, d_L2DifferenceArray);
#endif
// --- Update black cells
#if defined(TEXTURE) && defined(MEMORY_OPTIMIZATION)
blackKernelMemoryOptimizationTexture << <dimGrid, dimBlock >> > (d_tempRed, d_tempBlack, omega, d_L2DifferenceArray);
#elif defined(TEXTURE) && !defined(MEMORY_OPTIMIZATION)
blackKernelTexture << <dimGrid, dimBlock >> > (d_tempRed, d_tempRed, omega, d_L2DifferenceArray);
#elif !defined(TEXTURE) && defined(MEMORY_OPTIMIZATION)
blackKernelMemoryOptimization << <dimGrid, dimBlock >> > (d_aP, d_aW, d_aE, d_aS, d_aN, d_b, d_tempRed, d_tempBlack, omega, d_L2DifferenceArray);
#else
blackKernelNoOptimization << <dimGrid, dimBlock >> > (d_aP, d_aW, d_aE, d_aS, d_aN, d_b, d_tempRed, d_tempRed, omega, d_L2DifferenceArray);
#endif
// --- Calculate residual
Real norm_L2 = thrust::reduce(thrust::device_pointer_cast(d_L2DifferenceArray), thrust::device_pointer_cast(d_L2DifferenceArray) + computationalSize);
norm_L2 = sqrt(norm_L2 / ((Real)problemSize));
if (iter % 100 == 0) printf("%5d, %0.6f\n", iter, norm_L2);
// --- If tolerance has been reached, end SOR iterations
if (norm_L2 < tol) break;
}
// --- Transfer final red and black temperatures back to the host
gpuErrchk(cudaMemcpy(h_tempRed, d_tempRed, computationalSize * sizeof(Real), cudaMemcpyDeviceToHost));
#ifdef MEMORY_OPTIMIZATION
//cudaMemcpy(h_tempBlack, d_tempRed, computationalSize * sizeof(Real), cudaMemcpyDeviceToHost);
cudaMemcpy(h_tempBlack, d_tempBlack, computationalSize * sizeof(Real), cudaMemcpyDeviceToHost);
#endif
double runtime = timerGPU.GetCounter();
printf("GPU\n");
printf("Iterations: %i\n", iter);
printf("Total time: %f s\n", runtime / 1000.0);
/****************************/
/* SAVING THE FINAL RESULTS */
/****************************/
Real *h_T = (Real *)calloc(NUM * NUM, sizeof(Real));
// for (int row = 1; row < NUM + 1; ++row) {
// for (int col = 1; col < NUM + 1; ++col) {
//
// // --- Red cells
// if ((row + col) % 2 == 0) {
// int ind = col * Nrows + (row + (col % 2)) / 2;
// h_T[((col - 1) * NUM) + row - 1] = h_tempRed[ind];
// }
// // --- Black cells
// else {
// int ind = col * Nrows + (row + ((col + 1) % 2)) / 2;
//#ifdef MEMORY_OPTIMIZATION
// h_T[((col - 1) * NUM) + row - 1] = h_tempBlack[ind];
//#else
// h_T[((col - 1) * NUM) + row - 1] = h_tempRed[ind];
//#endif
// }
// }
// }
for (int row = 1; row < NUM + 1; ++row) {
for (int col = 1; col < NUM + 1; ++col) {
// --- Red cells
if ((row + col) % 2 == 0) {
#ifdef MEMORY_OPTIMIZATION
int ind = col * Nrows + (row + (col % 2)) / 2;
#else
int ind = col * Nrows + row;
#endif
h_T[(col - 1) * NUM + row - 1] = h_tempRed[ind];
}
// --- Black cells
else {
#ifdef MEMORY_OPTIMIZATION
int ind = col * Nrows + (row + ((col + 1) % 2)) / 2;
h_T[((col - 1) * NUM) + row - 1] = h_tempBlack[ind];
#else
int ind = col * Nrows + row;
h_T[((col - 1) * NUM) + row - 1] = h_tempRed[ind];
#endif
}
}
}
printf("Saving...\n");
saveCPUrealtxt(h_T, "D:\\Project\\Laplace_SOR_Red_Black\\Laplace_SOR_Red_Black\\Temp.txt", NUM * NUM);
#ifndef MEMORY_OPTIMIZATION
saveCPUrealtxt(h_tempRed, "D:\\Project\\Laplace_SOR_Red_Black\\Laplace_SOR_Red_Black\\Temp_red.txt", (NUM + 2) * (NUM + 2));
#endif
// --- Free device memory
gpuErrchk(cudaFree(d_aP));
gpuErrchk(cudaFree(d_aW));
gpuErrchk(cudaFree(d_aE));
gpuErrchk(cudaFree(d_aS));
gpuErrchk(cudaFree(d_aN));
gpuErrchk(cudaFree(d_b));
gpuErrchk(cudaFree(d_tempRed));
#ifdef MEMORY_OPTIMIZATION
gpuErrchk(cudaFree(d_tempBlack));
#endif
gpuErrchk(cudaFree(d_L2DifferenceArray));
#ifdef TEXTURE
// --- Unbind textures
gpuErrchk(cudaUnbindTexture(t_aP));
gpuErrchk(cudaUnbindTexture(t_aW));
gpuErrchk(cudaUnbindTexture(t_aE));
gpuErrchk(cudaUnbindTexture(t_aS));
gpuErrchk(cudaUnbindTexture(t_aN));
gpuErrchk(cudaUnbindTexture(t_b));
#endif
free(h_aP);
free(h_aW);
free(h_aE);
free(h_aS);
free(h_aN);
free(h_b);
free(h_tempRed);
free(h_tempBlack);
gpuErrchk(cudaDeviceReset());
return 0;
}
|
2d6d2f5285b246c333ca953a3cff1810fcad3878.hip | // !!! This is a file automatically generated by hipify!!!
// RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args
// CHECK: #include <hip/hip_runtime.h>
// CHECK: #include <hipsparse.h>
// CHECK: #include <hipblas.h>
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include <rocblas.h>
#include <cstring>
#include <cassert>
#include <cstdio>
#define Min(x,y) ((x)<(y)?(x):(y))
#define Max(x,y) ((x)>(y)?(x):(y))
#define Abs(x) ((x)>(0)?(x):-(x))
// CHECK: static void CudaCheckCore(hipError_t code, const char *file, int line) {
static void CudaCheckCore(hipError_t code, const char *file, int line) {
// CHECK: if (code != hipSuccess) {
if (code != hipSuccess) {
// CHECK: fprintf(stderr,"Cuda Error %d : %s %s %d\n", code, hipGetErrorString(code), file, line);
fprintf(stderr,"Cuda Error %d : %s %s %d\n", code, hipGetErrorString(code), file, line);
exit(code);
}
}
#define CudaCheck( test ) { CudaCheckCore((test), __FILE__, __LINE__); }
// CHECK: #define CudaCheckAfterCall() { CudaCheckCore((hipGetLastError()), __FILE__, __LINE__); }
#define CudaCheckAfterCall() { CudaCheckCore((hipGetLastError()), __FILE__, __LINE__); }
// CHECK: static const char * GetErrorString(hipsparseStatus_t error) {
static const char * GetErrorString(hipsparseStatus_t error) {
switch (error) {
// CHECK: case HIPSPARSE_STATUS_SUCCESS:
case HIPSPARSE_STATUS_SUCCESS:
return "The operation completed successfully.";
// CHECK: case HIPSPARSE_STATUS_NOT_INITIALIZED:
case HIPSPARSE_STATUS_NOT_INITIALIZED:
return "The cuSPARSE library was not initialized. This is usually caused by the lack of a prior call, an error in the CUDA Runtime API called by the cuSPARSE routine, or an error in the hardware setup.\n" \
"To correct: call hipsparseCreate() prior to the function call; and check that the hardware, an appropriate version of the driver, and the cuSPARSE library are correctly installed.";
// CHECK: case HIPSPARSE_STATUS_ALLOC_FAILED:
case HIPSPARSE_STATUS_ALLOC_FAILED:
return "Resource allocation failed inside the cuSPARSE library. This is usually caused by a hipMalloc() failure.\n"\
"To correct: prior to the function call, deallocate previously allocated memory as much as possible.";
// CHECK: case HIPSPARSE_STATUS_INVALID_VALUE:
case HIPSPARSE_STATUS_INVALID_VALUE:
return "An unsupported value or parameter was passed to the function (a negative vector size, for example).\n"\
"To correct: ensure that all the parameters being passed have valid values.";
// CHECK: case HIPSPARSE_STATUS_ARCH_MISMATCH:
case HIPSPARSE_STATUS_ARCH_MISMATCH:
return "The function requires a feature absent from the device architecture; usually caused by the lack of support for atomic operations or double precision.\n"\
"To correct: compile and run the application on a device with appropriate compute capability, which is 1.1 for 32-bit atomic operations and 1.3 for double precision.";
// CHECK: case HIPSPARSE_STATUS_MAPPING_ERROR:
case HIPSPARSE_STATUS_MAPPING_ERROR:
return "An access to GPU memory space failed, which is usually caused by a failure to bind a texture.\n"\
"To correct: prior to the function call, unbind any previously bound textures.";
// CHECK: case HIPSPARSE_STATUS_EXECUTION_FAILED:
case HIPSPARSE_STATUS_EXECUTION_FAILED:
return "The GPU program failed to execute. This is often caused by a launch failure of the kernel on the GPU, which can be caused by multiple reasons.\n"\
"To correct: check that the hardware, an appropriate version of the driver, and the cuSPARSE library are correctly installed.";
// CHECK: case HIPSPARSE_STATUS_INTERNAL_ERROR:
case HIPSPARSE_STATUS_INTERNAL_ERROR:
return "An internal cuSPARSE operation failed. This error is usually caused by a hipMemcpyAsync() failure.\n"\
"To correct: check that the hardware, an appropriate version of the driver, and the cuSPARSE library are correctly installed. Also, check that the memory passed as a parameter to the routine is not being deallocated prior to the routines completion.";
// CHECK: case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
// CHECK: "To correct: check that the fields in hipsparseMatDescr_t descrA were set correctly.";
case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "The matrix type is not supported by this function. This is usually caused by passing an invalid matrix descriptor to the function.\n"\
"To correct: check that the fields in hipsparseMatDescr_t descrA were set correctly.";
}
return "<unknown>";
}
// CHECK: static void CudaSparseCheckCore(hipsparseStatus_t code, const char *file, int line) {
static void CudaSparseCheckCore(hipsparseStatus_t code, const char *file, int line) {
// CHECK: if (code != HIPSPARSE_STATUS_SUCCESS) {
if (code != HIPSPARSE_STATUS_SUCCESS) {
fprintf(stderr,"Cuda Error %d : %s %s %d\n", code, GetErrorString(code), file, line);
exit(code);
}
}
#define CudaSparseCheck( test ) { CudaSparseCheckCore((test), __FILE__, __LINE__); }
// Alloc and copy
template <class ObjectType>
ObjectType* allocAndCopy(const ObjectType src[], const int size) {
ObjectType* dest = NULL;
// CHECK: CudaCheck( hipMalloc(&dest,size*sizeof(ObjectType)) );
CudaCheck( hipMalloc(&dest,size*sizeof(ObjectType)) );
// CHECK: CudaCheck( hipMemcpy(dest, src, size*sizeof(ObjectType), hipMemcpyHostToDevice ) );
CudaCheck( hipMemcpy(dest, src, size*sizeof(ObjectType), hipMemcpyHostToDevice ) );
return dest;
}
template <class ObjectType>
ObjectType* alloc(const int size) {
ObjectType* dest = NULL;
// CHECK: CudaCheck( hipMalloc(&dest,size*sizeof(ObjectType)) );
CudaCheck( hipMalloc(&dest,size*sizeof(ObjectType)) );
return dest;
}
template <class ObjectType>
ObjectType* allocAndCopyPart(const ObjectType src[], const int size, const int allocSize) {
ObjectType* dest = NULL;
assert(size <= allocSize);
// CHECK: CudaCheck( hipMalloc(&dest,allocSize*sizeof(ObjectType)) );
// CHECK: CudaCheck( hipMemcpy(dest, src, size*sizeof(ObjectType), hipMemcpyHostToDevice ) );
// CHECK: CudaCheck( hipMemset(&dest[size],0,(allocSize-size)*sizeof(ObjectType)) );
CudaCheck( hipMalloc(&dest,allocSize*sizeof(ObjectType)) );
CudaCheck( hipMemcpy(dest, src, size*sizeof(ObjectType), hipMemcpyHostToDevice ) );
CudaCheck( hipMemset(&dest[size],0,(allocSize-size)*sizeof(ObjectType)) );
return dest;
}
// COO part
#include <algorithm>
struct Ijv {
int i, j;
double v;
};
bool IjvComp(const Ijv& v1, const Ijv& v2) {
return v1.i < v2.i || (v1.i == v2.i && v1.j < v2.j);
}
struct COOArrays {
int m;
int nnz;
double *val;/*values(NNZ)*/
int *rowind;/*i(NNZ)*/
int *colind;/*j(NNZ)*/
COOArrays() {
val = NULL;
rowind = NULL;
colind = NULL;
}
~COOArrays() {
delete[] val;
delete[] rowind;
delete[] colind;
}
void sortToRowMajor() {
Ijv* ijvs = new Ijv[nnz];
for(int idxCopy = 0 ; idxCopy < nnz ; ++idxCopy){
ijvs[idxCopy].i = rowind[idxCopy];
ijvs[idxCopy].j = colind[idxCopy];
ijvs[idxCopy].v = val[idxCopy];
}
std::sort(ijvs, ijvs+nnz, IjvComp);
for(int idxCopy = 0 ; idxCopy < nnz ; ++idxCopy){
rowind[idxCopy] = ijvs[idxCopy].i;
colind[idxCopy] = ijvs[idxCopy].j;
val[idxCopy] = ijvs[idxCopy].v;
}
delete[] ijvs;
}
};
void compute_COO(COOArrays& coo, double *x , double *y ) {
for(int idxVal = 0 ; idxVal < coo.nnz ; ++idxVal){
y[coo.rowind[idxVal]] += x[coo.colind[idxVal]] * coo.val[idxVal];
}
}
// COO part
struct CRSArrays {
int m; //< the dim of the matrix
int nnz;//< the number of nnz (== ia[m])
double *cu_csrValA; //< the values (of size NNZ)
int *cu_csrRowPtrA;//< the usual rowptr (of size m+1)
int *cu_csrColIndA;//< the colidx of each NNZ (of size nnz)
// CHECK: hipStream_t streamId;
// CHECK: hipsparseHandle_t cusparseHandle;
hipStream_t streamId;
hipsparseHandle_t cusparseHandle;
CRSArrays() {
cu_csrValA = NULL;
cu_csrRowPtrA = NULL;
cu_csrColIndA = NULL;
// Create sparse handle (needed to call sparse functions
streamId = 0;
// CHECK-NOT: hipsparseHandle = 0;
cusparseHandle = 0;
// CHECK: CudaSparseCheck(hipsparseCreate(&cusparseHandle));
// CHECK: CudaSparseCheck(hipsparseSetStream(cusparseHandle, streamId));
CudaSparseCheck(hipsparseCreate(&cusparseHandle));
CudaSparseCheck(hipsparseSetStream(cusparseHandle, streamId));
}
~CRSArrays() {
// CHECK: CudaCheck(hipFree(cu_csrValA));
// CHECK: CudaCheck(hipFree(cu_csrRowPtrA));
// CHECK: CudaCheck(hipFree(cu_csrColIndA));
CudaCheck(hipFree(cu_csrValA));
CudaCheck(hipFree(cu_csrRowPtrA));
CudaCheck(hipFree(cu_csrColIndA));
// Destroy sparse handle
// CHECK: CudaSparseCheck(hipsparseDestroy(cusparseHandle));
CudaSparseCheck(hipsparseDestroy(cusparseHandle));
}
};
void COO_to_CRS(COOArrays& coo, CRSArrays* crs) {
// We need COO to be sorted by row (and column)
coo.sortToRowMajor();
crs->m = coo.m;
crs->nnz = coo.nnz;
// Convert COO to CSR (it is just for the rows idx)
crs->cu_csrRowPtrA = alloc<int>(coo.m+1);
{
int* cu_cooRowIndA = allocAndCopy(coo.rowind, coo.nnz);
// CHECK: CudaSparseCheck(hipsparseXcoo2csr(crs->cusparseHandle, cu_cooRowIndA,
// CHECK: coo.nnz, coo.m, crs->cu_csrRowPtrA, HIPSPARSE_INDEX_BASE_ZERO));
CudaSparseCheck(hipsparseXcoo2csr(crs->cusparseHandle, cu_cooRowIndA,
coo.nnz, coo.m, crs->cu_csrRowPtrA, HIPSPARSE_INDEX_BASE_ZERO));
// CHECK: CudaCheck(hipFree(cu_cooRowIndA));
CudaCheck(hipFree(cu_cooRowIndA));
}
// Copy cols idx and values that are unchanged
crs->cu_csrValA = allocAndCopy(coo.val, coo.nnz);
crs->cu_csrColIndA = allocAndCopy(coo.colind, coo.nnz);
}
double compute_CRS( CRSArrays& crs, double *x , double *y) {
// For blas 2 gemv y = alpha.x.A + Beta.y
const double alpha = 1.0;
const double beta = 0.0;
// Copy input
double* cu_x = allocAndCopy(x, crs.m);
double* cu_y = allocAndCopy(y, crs.m);
// Init matrix properties
// CHECK: hipsparseMatDescr_t descr = 0;
hipsparseMatDescr_t descr = 0;
// CHECK: CudaSparseCheck(hipsparseCreateMatDescr(&descr));
CudaSparseCheck(hipsparseCreateMatDescr(&descr));
// CHECK: hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
// CHECK: hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
// Compute gemv
float gemvComputeTume = 0;
{
// CHECK: hipEvent_t startTime, stopTime;
// CHECK: hipEventCreate(&startTime);
// CHECK: hipEventCreate(&stopTime);
// CHECK: hipEventRecord(startTime, crs.streamId);
hipEvent_t startTime, stopTime;
hipEventCreate(&startTime);
hipEventCreate(&stopTime);
hipEventRecord(startTime, crs.streamId);
// CHECK: CudaSparseCheck(hipsparseDcsrmv(crs.cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
CudaSparseCheck(hipsparseDcsrmv(crs.cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
crs.m, crs.m, crs.nnz, &alpha,
descr, crs.cu_csrValA, crs.cu_csrRowPtrA,
crs.cu_csrColIndA, cu_x, &beta, cu_y));
// CHECK: hipEventRecord(stopTime, crs.streamId);
// CHECK: hipEventSynchronize(stopTime);
// CHECK: hipEventElapsedTime(&gemvComputeTume, startTime, stopTime);
hipEventRecord(stopTime, crs.streamId);
hipEventSynchronize(stopTime);
hipEventElapsedTime(&gemvComputeTume, startTime, stopTime);
gemvComputeTume /=1000.0;
}
// Get back result
// CHECK: CudaCheck( hipMemcpy(y, cu_y, crs.m*sizeof(double), hipMemcpyDeviceToHost ) );
CudaCheck( hipMemcpy(y, cu_y, crs.m*sizeof(double), hipMemcpyDeviceToHost ) );
// Dealloc vectors
// CHECK: CudaCheck(hipFree(cu_x));
// CHECK: CudaCheck(hipFree(cu_y));
CudaCheck(hipFree(cu_x));
CudaCheck(hipFree(cu_y));
return gemvComputeTume;
}
// BCSR part
struct BCRSArrays {
int m;
int nnz;
int nbBlocks;
int nbBlockRow;
int blockSize;
int* cu_bsrRowPtrC;
int* cu_bsrColIndC;
double* cu_bsrValC;
// CHECK: hipStream_t streamId;
hipStream_t streamId;
// CHECK: hipsparseHandle_t cusparseHandle;
hipsparseHandle_t cusparseHandle;
BCRSArrays() {
cu_bsrRowPtrC = NULL;
cu_bsrColIndC = NULL;
cu_bsrValC = NULL;
// Create sparse handle (needed to call sparse functions
streamId = 0;
// CHECK: CudaSparseCheck(hipsparseCreate(&cusparseHandle));
// CHECK: CudaSparseCheck(hipsparseSetStream(cusparseHandle, streamId));
CudaSparseCheck(hipsparseCreate(&cusparseHandle));
CudaSparseCheck(hipsparseSetStream(cusparseHandle, streamId));
}
~BCRSArrays() {
// CHECK: CudaCheck(hipFree(cu_bsrRowPtrC));
// CHECK: CudaCheck(hipFree(cu_bsrColIndC));
// CHECK: CudaCheck(hipFree(cu_bsrValC));
CudaCheck(hipFree(cu_bsrRowPtrC));
CudaCheck(hipFree(cu_bsrColIndC));
CudaCheck(hipFree(cu_bsrValC));
// Destroy sparse handle
// CHECK: CudaSparseCheck(hipsparseDestroy(cusparseHandle));
CudaSparseCheck(hipsparseDestroy(cusparseHandle));
}
};
void CRS_to_BCRS(CRSArrays& csr, BCRSArrays* bcrs, const int blockSize) {
bcrs->m = csr.m;
bcrs->nnz = csr.nnz;
bcrs->blockSize = blockSize;
bcrs->nbBlockRow = (csr.m + blockSize-1)/blockSize;
// CHECK: hipMalloc((void**)&bcrs->cu_bsrRowPtrC, sizeof(int) *(bcrs->nbBlockRow+1));
hipMalloc((void**)&bcrs->cu_bsrRowPtrC, sizeof(int) *(bcrs->nbBlockRow+1));
// CHECK: hipsparseMatDescr_t descr = 0;
hipsparseMatDescr_t descr = 0;
// CHECK: CudaSparseCheck(hipsparseCreateMatDescr(&descr));
// CHECK: hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
// CHECK: hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
CudaSparseCheck(hipsparseCreateMatDescr(&descr));
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
int nbNnzBlocks;
// NOTE: cusparseXcsr2bsrNnz and HIPSPARSE_DIRECTION_COLUMN (of type hipsparseDirection_t) are yet unsupported by HIP
// CHECK-NOT: hipsparseXcsr2bsrNnz(bcrs->cusparseHandle, HIPSPARSE_DIRECTION_COLUMN, csr.m, csr.m, descr, csr.cu_csrRowPtrA, csr.cu_csrColIndA,
cusparseXcsr2bsrNnz(bcrs->cusparseHandle, HIPSPARSE_DIRECTION_COLUMN, csr.m, csr.m, descr, csr.cu_csrRowPtrA, csr.cu_csrColIndA,
blockSize, descr, bcrs->cu_bsrRowPtrC, &nbNnzBlocks);
{
int firstBlockIdx, lastBlockIdx;
// CHECK: hipMemcpy(&lastBlockIdx, bcrs->cu_bsrRowPtrC+bcrs->nbBlockRow, sizeof(int), hipMemcpyDeviceToHost);
// CHECK: hipMemcpy(&firstBlockIdx, bcrs->cu_bsrRowPtrC, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&lastBlockIdx, bcrs->cu_bsrRowPtrC+bcrs->nbBlockRow, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&firstBlockIdx, bcrs->cu_bsrRowPtrC, sizeof(int), hipMemcpyDeviceToHost);
assert(firstBlockIdx == 0); // we are in base 0
assert(nbNnzBlocks == lastBlockIdx - firstBlockIdx);
}
bcrs->nbBlocks = nbNnzBlocks;
// CHECK: CudaCheck(hipMalloc((void**)&bcrs->cu_bsrColIndC, sizeof(int)*nbNnzBlocks));
// CHECK: CudaCheck(hipMalloc((void**)&bcrs->cu_bsrValC, sizeof(double)*(blockSize*blockSize)*nbNnzBlocks));
CudaCheck(hipMalloc((void**)&bcrs->cu_bsrColIndC, sizeof(int)*nbNnzBlocks));
CudaCheck(hipMalloc((void**)&bcrs->cu_bsrValC, sizeof(double)*(blockSize*blockSize)*nbNnzBlocks));
// NOTE: cusparseDcsr2bsr and HIPSPARSE_DIRECTION_COLUMN (of type hipsparseDirection_t) are yet unsupported by HIP
// CHECK-NOT: hipsparseDcsr2bsr(bcrs->cusparseHandle, HIPSPARSE_DIRECTION_COLUMN,
cusparseDcsr2bsr(bcrs->cusparseHandle, HIPSPARSE_DIRECTION_COLUMN,
csr.m, csr.m, descr, csr.cu_csrValA, csr.cu_csrRowPtrA, csr.cu_csrColIndA, blockSize, descr, bcrs->cu_bsrValC, bcrs->cu_bsrRowPtrC, bcrs->cu_bsrColIndC);
}
double compute_BSR(BCRSArrays& bcsr, double *x , double *y){
// For blas 2 gemv y = alpha.x.A + Beta.y
const double alpha = 1.0;
const double beta = 0.0;
// Copy input
const int sizeMultipleBlockSize = ((bcsr.m+bcsr.blockSize-1)/bcsr.blockSize)*bcsr.blockSize;
double* cu_x = allocAndCopyPart(x, bcsr.m, sizeMultipleBlockSize);
double* cu_y = allocAndCopyPart(y, bcsr.m, sizeMultipleBlockSize);
// Init matrix properties
// CHECK: hipsparseMatDescr_t descr = 0;
// CHECK: CudaSparseCheck(hipsparseCreateMatDescr(&descr));
// CHECK: hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
// CHECK: hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
hipsparseMatDescr_t descr = 0;
CudaSparseCheck(hipsparseCreateMatDescr(&descr));
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
// Compute gemv
float gemvComputeTume = 0;
{
// CHECK: hipEvent_t startTime, stopTime;
// CHECK: hipEventCreate(&startTime);
// CHECK: hipEventCreate(&stopTime);
// CHECK: hipEventRecord(startTime, bcsr.streamId);
hipEvent_t startTime, stopTime;
hipEventCreate(&startTime);
hipEventCreate(&stopTime);
hipEventRecord(startTime, bcsr.streamId);
// CHECK: hipsparseDbsrmv(bcsr.cusparseHandle, HIPSPARSE_DIRECTION_COLUMN, HIPSPARSE_OPERATION_NON_TRANSPOSE,
hipsparseDbsrmv(bcsr.cusparseHandle, HIPSPARSE_DIRECTION_COLUMN, HIPSPARSE_OPERATION_NON_TRANSPOSE,
bcsr.nbBlockRow, bcsr.m, bcsr.nbBlocks, &alpha, descr,
bcsr.cu_bsrValC, bcsr.cu_bsrRowPtrC, bcsr.cu_bsrColIndC, bcsr.blockSize,
cu_x, &beta, cu_y);
// CHECK: hipEventRecord(stopTime, bcsr.streamId);
// CHECK: hipEventSynchronize(stopTime);
// CHECK: hipEventElapsedTime(&gemvComputeTume, startTime, stopTime);
hipEventRecord(stopTime, bcsr.streamId);
hipEventSynchronize(stopTime);
hipEventElapsedTime(&gemvComputeTume, startTime, stopTime);
gemvComputeTume /=1000.0;
}
// Get back result
// CHECK: CudaCheck( hipMemcpy(y, cu_y, bcsr.m*sizeof(double), hipMemcpyDeviceToHost ) );
CudaCheck( hipMemcpy(y, cu_y, bcsr.m*sizeof(double), hipMemcpyDeviceToHost ) );
// Dealloc vectors
// CHECK: CudaCheck(hipFree(cu_x));
// CHECK: CudaCheck(hipFree(cu_y));
CudaCheck(hipFree(cu_x));
CudaCheck(hipFree(cu_y));
return gemvComputeTume;
}
| 2d6d2f5285b246c333ca953a3cff1810fcad3878.cu | // RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args
// CHECK: #include <hip/hip_runtime.h>
// CHECK: #include <hipsparse.h>
// CHECK: #include <hipblas.h>
#include <cuda.h>
#include <cusparse_v2.h>
#include <cublas_v2.h>
#include <cstring>
#include <cassert>
#include <cstdio>
#define Min(x,y) ((x)<(y)?(x):(y))
#define Max(x,y) ((x)>(y)?(x):(y))
#define Abs(x) ((x)>(0)?(x):-(x))
// CHECK: static void CudaCheckCore(hipError_t code, const char *file, int line) {
static void CudaCheckCore(cudaError_t code, const char *file, int line) {
// CHECK: if (code != hipSuccess) {
if (code != cudaSuccess) {
// CHECK: fprintf(stderr,"Cuda Error %d : %s %s %d\n", code, hipGetErrorString(code), file, line);
fprintf(stderr,"Cuda Error %d : %s %s %d\n", code, cudaGetErrorString(code), file, line);
exit(code);
}
}
#define CudaCheck( test ) { CudaCheckCore((test), __FILE__, __LINE__); }
// CHECK: #define CudaCheckAfterCall() { CudaCheckCore((hipGetLastError()), __FILE__, __LINE__); }
#define CudaCheckAfterCall() { CudaCheckCore((cudaGetLastError()), __FILE__, __LINE__); }
// CHECK: static const char * GetErrorString(hipsparseStatus_t error) {
static const char * GetErrorString(cusparseStatus_t error) {
switch (error) {
// CHECK: case HIPSPARSE_STATUS_SUCCESS:
case CUSPARSE_STATUS_SUCCESS:
return "The operation completed successfully.";
// CHECK: case HIPSPARSE_STATUS_NOT_INITIALIZED:
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "The cuSPARSE library was not initialized. This is usually caused by the lack of a prior call, an error in the CUDA Runtime API called by the cuSPARSE routine, or an error in the hardware setup.\n" \
"To correct: call cusparseCreate() prior to the function call; and check that the hardware, an appropriate version of the driver, and the cuSPARSE library are correctly installed.";
// CHECK: case HIPSPARSE_STATUS_ALLOC_FAILED:
case CUSPARSE_STATUS_ALLOC_FAILED:
return "Resource allocation failed inside the cuSPARSE library. This is usually caused by a cudaMalloc() failure.\n"\
"To correct: prior to the function call, deallocate previously allocated memory as much as possible.";
// CHECK: case HIPSPARSE_STATUS_INVALID_VALUE:
case CUSPARSE_STATUS_INVALID_VALUE:
return "An unsupported value or parameter was passed to the function (a negative vector size, for example).\n"\
"To correct: ensure that all the parameters being passed have valid values.";
// CHECK: case HIPSPARSE_STATUS_ARCH_MISMATCH:
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "The function requires a feature absent from the device architecture; usually caused by the lack of support for atomic operations or double precision.\n"\
"To correct: compile and run the application on a device with appropriate compute capability, which is 1.1 for 32-bit atomic operations and 1.3 for double precision.";
// CHECK: case HIPSPARSE_STATUS_MAPPING_ERROR:
case CUSPARSE_STATUS_MAPPING_ERROR:
return "An access to GPU memory space failed, which is usually caused by a failure to bind a texture.\n"\
"To correct: prior to the function call, unbind any previously bound textures.";
// CHECK: case HIPSPARSE_STATUS_EXECUTION_FAILED:
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "The GPU program failed to execute. This is often caused by a launch failure of the kernel on the GPU, which can be caused by multiple reasons.\n"\
"To correct: check that the hardware, an appropriate version of the driver, and the cuSPARSE library are correctly installed.";
// CHECK: case HIPSPARSE_STATUS_INTERNAL_ERROR:
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "An internal cuSPARSE operation failed. This error is usually caused by a cudaMemcpyAsync() failure.\n"\
"To correct: check that the hardware, an appropriate version of the driver, and the cuSPARSE library are correctly installed. Also, check that the memory passed as a parameter to the routine is not being deallocated prior to the routine’s completion.";
// CHECK: case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
// CHECK: "To correct: check that the fields in hipsparseMatDescr_t descrA were set correctly.";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "The matrix type is not supported by this function. This is usually caused by passing an invalid matrix descriptor to the function.\n"\
"To correct: check that the fields in cusparseMatDescr_t descrA were set correctly.";
}
return "<unknown>";
}
// CHECK: static void CudaSparseCheckCore(hipsparseStatus_t code, const char *file, int line) {
static void CudaSparseCheckCore(cusparseStatus_t code, const char *file, int line) {
// CHECK: if (code != HIPSPARSE_STATUS_SUCCESS) {
if (code != CUSPARSE_STATUS_SUCCESS) {
fprintf(stderr,"Cuda Error %d : %s %s %d\n", code, GetErrorString(code), file, line);
exit(code);
}
}
#define CudaSparseCheck( test ) { CudaSparseCheckCore((test), __FILE__, __LINE__); }
// Alloc and copy
template <class ObjectType>
ObjectType* allocAndCopy(const ObjectType src[], const int size) {
ObjectType* dest = NULL;
// CHECK: CudaCheck( hipMalloc(&dest,size*sizeof(ObjectType)) );
CudaCheck( cudaMalloc(&dest,size*sizeof(ObjectType)) );
// CHECK: CudaCheck( hipMemcpy(dest, src, size*sizeof(ObjectType), hipMemcpyHostToDevice ) );
CudaCheck( cudaMemcpy(dest, src, size*sizeof(ObjectType), cudaMemcpyHostToDevice ) );
return dest;
}
template <class ObjectType>
ObjectType* alloc(const int size) {
ObjectType* dest = NULL;
// CHECK: CudaCheck( hipMalloc(&dest,size*sizeof(ObjectType)) );
CudaCheck( cudaMalloc(&dest,size*sizeof(ObjectType)) );
return dest;
}
template <class ObjectType>
ObjectType* allocAndCopyPart(const ObjectType src[], const int size, const int allocSize) {
ObjectType* dest = NULL;
assert(size <= allocSize);
// CHECK: CudaCheck( hipMalloc(&dest,allocSize*sizeof(ObjectType)) );
// CHECK: CudaCheck( hipMemcpy(dest, src, size*sizeof(ObjectType), hipMemcpyHostToDevice ) );
// CHECK: CudaCheck( hipMemset(&dest[size],0,(allocSize-size)*sizeof(ObjectType)) );
CudaCheck( cudaMalloc(&dest,allocSize*sizeof(ObjectType)) );
CudaCheck( cudaMemcpy(dest, src, size*sizeof(ObjectType), cudaMemcpyHostToDevice ) );
CudaCheck( cudaMemset(&dest[size],0,(allocSize-size)*sizeof(ObjectType)) );
return dest;
}
// COO part
#include <algorithm>
struct Ijv {
int i, j;
double v;
};
bool IjvComp(const Ijv& v1, const Ijv& v2) {
return v1.i < v2.i || (v1.i == v2.i && v1.j < v2.j);
}
struct COOArrays {
int m;
int nnz;
double *val;/*values(NNZ)*/
int *rowind;/*i(NNZ)*/
int *colind;/*j(NNZ)*/
COOArrays() {
val = NULL;
rowind = NULL;
colind = NULL;
}
~COOArrays() {
delete[] val;
delete[] rowind;
delete[] colind;
}
void sortToRowMajor() {
Ijv* ijvs = new Ijv[nnz];
for(int idxCopy = 0 ; idxCopy < nnz ; ++idxCopy){
ijvs[idxCopy].i = rowind[idxCopy];
ijvs[idxCopy].j = colind[idxCopy];
ijvs[idxCopy].v = val[idxCopy];
}
std::sort(ijvs, ijvs+nnz, IjvComp);
for(int idxCopy = 0 ; idxCopy < nnz ; ++idxCopy){
rowind[idxCopy] = ijvs[idxCopy].i;
colind[idxCopy] = ijvs[idxCopy].j;
val[idxCopy] = ijvs[idxCopy].v;
}
delete[] ijvs;
}
};
void compute_COO(COOArrays& coo, double *x , double *y ) {
for(int idxVal = 0 ; idxVal < coo.nnz ; ++idxVal){
y[coo.rowind[idxVal]] += x[coo.colind[idxVal]] * coo.val[idxVal];
}
}
// COO part
struct CRSArrays {
int m; //< the dim of the matrix
int nnz;//< the number of nnz (== ia[m])
double *cu_csrValA; //< the values (of size NNZ)
int *cu_csrRowPtrA;//< the usual rowptr (of size m+1)
int *cu_csrColIndA;//< the colidx of each NNZ (of size nnz)
// CHECK: hipStream_t streamId;
// CHECK: hipsparseHandle_t cusparseHandle;
cudaStream_t streamId;
cusparseHandle_t cusparseHandle;
CRSArrays() {
cu_csrValA = NULL;
cu_csrRowPtrA = NULL;
cu_csrColIndA = NULL;
// Create sparse handle (needed to call sparse functions
streamId = 0;
// CHECK-NOT: hipsparseHandle = 0;
cusparseHandle = 0;
// CHECK: CudaSparseCheck(hipsparseCreate(&cusparseHandle));
// CHECK: CudaSparseCheck(hipsparseSetStream(cusparseHandle, streamId));
CudaSparseCheck(cusparseCreate(&cusparseHandle));
CudaSparseCheck(cusparseSetStream(cusparseHandle, streamId));
}
~CRSArrays() {
// CHECK: CudaCheck(hipFree(cu_csrValA));
// CHECK: CudaCheck(hipFree(cu_csrRowPtrA));
// CHECK: CudaCheck(hipFree(cu_csrColIndA));
CudaCheck(cudaFree(cu_csrValA));
CudaCheck(cudaFree(cu_csrRowPtrA));
CudaCheck(cudaFree(cu_csrColIndA));
// Destroy sparse handle
// CHECK: CudaSparseCheck(hipsparseDestroy(cusparseHandle));
CudaSparseCheck(cusparseDestroy(cusparseHandle));
}
};
void COO_to_CRS(COOArrays& coo, CRSArrays* crs) {
// We need COO to be sorted by row (and column)
coo.sortToRowMajor();
crs->m = coo.m;
crs->nnz = coo.nnz;
// Convert COO to CSR (it is just for the rows idx)
crs->cu_csrRowPtrA = alloc<int>(coo.m+1);
{
int* cu_cooRowIndA = allocAndCopy(coo.rowind, coo.nnz);
// CHECK: CudaSparseCheck(hipsparseXcoo2csr(crs->cusparseHandle, cu_cooRowIndA,
// CHECK: coo.nnz, coo.m, crs->cu_csrRowPtrA, HIPSPARSE_INDEX_BASE_ZERO));
CudaSparseCheck(cusparseXcoo2csr(crs->cusparseHandle, cu_cooRowIndA,
coo.nnz, coo.m, crs->cu_csrRowPtrA, CUSPARSE_INDEX_BASE_ZERO));
// CHECK: CudaCheck(hipFree(cu_cooRowIndA));
CudaCheck(cudaFree(cu_cooRowIndA));
}
// Copy cols idx and values that are unchanged
crs->cu_csrValA = allocAndCopy(coo.val, coo.nnz);
crs->cu_csrColIndA = allocAndCopy(coo.colind, coo.nnz);
}
double compute_CRS( CRSArrays& crs, double *x , double *y) {
// For blas 2 gemv y = alpha.x.A + Beta.y
const double alpha = 1.0;
const double beta = 0.0;
// Copy input
double* cu_x = allocAndCopy(x, crs.m);
double* cu_y = allocAndCopy(y, crs.m);
// Init matrix properties
// CHECK: hipsparseMatDescr_t descr = 0;
cusparseMatDescr_t descr = 0;
// CHECK: CudaSparseCheck(hipsparseCreateMatDescr(&descr));
CudaSparseCheck(cusparseCreateMatDescr(&descr));
// CHECK: hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
// CHECK: hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
// Compute gemv
float gemvComputeTume = 0;
{
// CHECK: hipEvent_t startTime, stopTime;
// CHECK: hipEventCreate(&startTime);
// CHECK: hipEventCreate(&stopTime);
// CHECK: hipEventRecord(startTime, crs.streamId);
cudaEvent_t startTime, stopTime;
cudaEventCreate(&startTime);
cudaEventCreate(&stopTime);
cudaEventRecord(startTime, crs.streamId);
// CHECK: CudaSparseCheck(hipsparseDcsrmv(crs.cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
CudaSparseCheck(cusparseDcsrmv(crs.cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
crs.m, crs.m, crs.nnz, &alpha,
descr, crs.cu_csrValA, crs.cu_csrRowPtrA,
crs.cu_csrColIndA, cu_x, &beta, cu_y));
// CHECK: hipEventRecord(stopTime, crs.streamId);
// CHECK: hipEventSynchronize(stopTime);
// CHECK: hipEventElapsedTime(&gemvComputeTume, startTime, stopTime);
cudaEventRecord(stopTime, crs.streamId);
cudaEventSynchronize(stopTime);
cudaEventElapsedTime(&gemvComputeTume, startTime, stopTime);
gemvComputeTume /=1000.0;
}
// Get back result
// CHECK: CudaCheck( hipMemcpy(y, cu_y, crs.m*sizeof(double), hipMemcpyDeviceToHost ) );
CudaCheck( cudaMemcpy(y, cu_y, crs.m*sizeof(double), cudaMemcpyDeviceToHost ) );
// Dealloc vectors
// CHECK: CudaCheck(hipFree(cu_x));
// CHECK: CudaCheck(hipFree(cu_y));
CudaCheck(cudaFree(cu_x));
CudaCheck(cudaFree(cu_y));
return gemvComputeTume;
}
// BCSR part
struct BCRSArrays {
int m;
int nnz;
int nbBlocks;
int nbBlockRow;
int blockSize;
int* cu_bsrRowPtrC;
int* cu_bsrColIndC;
double* cu_bsrValC;
// CHECK: hipStream_t streamId;
cudaStream_t streamId;
// CHECK: hipsparseHandle_t cusparseHandle;
cusparseHandle_t cusparseHandle;
BCRSArrays() {
cu_bsrRowPtrC = NULL;
cu_bsrColIndC = NULL;
cu_bsrValC = NULL;
// Create sparse handle (needed to call sparse functions
streamId = 0;
// CHECK: CudaSparseCheck(hipsparseCreate(&cusparseHandle));
// CHECK: CudaSparseCheck(hipsparseSetStream(cusparseHandle, streamId));
CudaSparseCheck(cusparseCreate(&cusparseHandle));
CudaSparseCheck(cusparseSetStream(cusparseHandle, streamId));
}
~BCRSArrays() {
// CHECK: CudaCheck(hipFree(cu_bsrRowPtrC));
// CHECK: CudaCheck(hipFree(cu_bsrColIndC));
// CHECK: CudaCheck(hipFree(cu_bsrValC));
CudaCheck(cudaFree(cu_bsrRowPtrC));
CudaCheck(cudaFree(cu_bsrColIndC));
CudaCheck(cudaFree(cu_bsrValC));
// Destroy sparse handle
// CHECK: CudaSparseCheck(hipsparseDestroy(cusparseHandle));
CudaSparseCheck(cusparseDestroy(cusparseHandle));
}
};
void CRS_to_BCRS(CRSArrays& csr, BCRSArrays* bcrs, const int blockSize) {
bcrs->m = csr.m;
bcrs->nnz = csr.nnz;
bcrs->blockSize = blockSize;
bcrs->nbBlockRow = (csr.m + blockSize-1)/blockSize;
// CHECK: hipMalloc((void**)&bcrs->cu_bsrRowPtrC, sizeof(int) *(bcrs->nbBlockRow+1));
cudaMalloc((void**)&bcrs->cu_bsrRowPtrC, sizeof(int) *(bcrs->nbBlockRow+1));
// CHECK: hipsparseMatDescr_t descr = 0;
cusparseMatDescr_t descr = 0;
// CHECK: CudaSparseCheck(hipsparseCreateMatDescr(&descr));
// CHECK: hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
// CHECK: hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
CudaSparseCheck(cusparseCreateMatDescr(&descr));
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
int nbNnzBlocks;
// NOTE: cusparseXcsr2bsrNnz and CUSPARSE_DIRECTION_COLUMN (of type cusparseDirection_t) are yet unsupported by HIP
// CHECK-NOT: hipsparseXcsr2bsrNnz(bcrs->cusparseHandle, HIPSPARSE_DIRECTION_COLUMN, csr.m, csr.m, descr, csr.cu_csrRowPtrA, csr.cu_csrColIndA,
cusparseXcsr2bsrNnz(bcrs->cusparseHandle, CUSPARSE_DIRECTION_COLUMN, csr.m, csr.m, descr, csr.cu_csrRowPtrA, csr.cu_csrColIndA,
blockSize, descr, bcrs->cu_bsrRowPtrC, &nbNnzBlocks);
{
int firstBlockIdx, lastBlockIdx;
// CHECK: hipMemcpy(&lastBlockIdx, bcrs->cu_bsrRowPtrC+bcrs->nbBlockRow, sizeof(int), hipMemcpyDeviceToHost);
// CHECK: hipMemcpy(&firstBlockIdx, bcrs->cu_bsrRowPtrC, sizeof(int), hipMemcpyDeviceToHost);
cudaMemcpy(&lastBlockIdx, bcrs->cu_bsrRowPtrC+bcrs->nbBlockRow, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&firstBlockIdx, bcrs->cu_bsrRowPtrC, sizeof(int), cudaMemcpyDeviceToHost);
assert(firstBlockIdx == 0); // we are in base 0
assert(nbNnzBlocks == lastBlockIdx - firstBlockIdx);
}
bcrs->nbBlocks = nbNnzBlocks;
// CHECK: CudaCheck(hipMalloc((void**)&bcrs->cu_bsrColIndC, sizeof(int)*nbNnzBlocks));
// CHECK: CudaCheck(hipMalloc((void**)&bcrs->cu_bsrValC, sizeof(double)*(blockSize*blockSize)*nbNnzBlocks));
CudaCheck(cudaMalloc((void**)&bcrs->cu_bsrColIndC, sizeof(int)*nbNnzBlocks));
CudaCheck(cudaMalloc((void**)&bcrs->cu_bsrValC, sizeof(double)*(blockSize*blockSize)*nbNnzBlocks));
// NOTE: cusparseDcsr2bsr and CUSPARSE_DIRECTION_COLUMN (of type cusparseDirection_t) are yet unsupported by HIP
// CHECK-NOT: hipsparseDcsr2bsr(bcrs->cusparseHandle, HIPSPARSE_DIRECTION_COLUMN,
cusparseDcsr2bsr(bcrs->cusparseHandle, CUSPARSE_DIRECTION_COLUMN,
csr.m, csr.m, descr, csr.cu_csrValA, csr.cu_csrRowPtrA, csr.cu_csrColIndA, blockSize, descr, bcrs->cu_bsrValC, bcrs->cu_bsrRowPtrC, bcrs->cu_bsrColIndC);
}
double compute_BSR(BCRSArrays& bcsr, double *x , double *y){
// For blas 2 gemv y = alpha.x.A + Beta.y
const double alpha = 1.0;
const double beta = 0.0;
// Copy input
const int sizeMultipleBlockSize = ((bcsr.m+bcsr.blockSize-1)/bcsr.blockSize)*bcsr.blockSize;
double* cu_x = allocAndCopyPart(x, bcsr.m, sizeMultipleBlockSize);
double* cu_y = allocAndCopyPart(y, bcsr.m, sizeMultipleBlockSize);
// Init matrix properties
// CHECK: hipsparseMatDescr_t descr = 0;
// CHECK: CudaSparseCheck(hipsparseCreateMatDescr(&descr));
// CHECK: hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
// CHECK: hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
cusparseMatDescr_t descr = 0;
CudaSparseCheck(cusparseCreateMatDescr(&descr));
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
// Compute gemv
float gemvComputeTume = 0;
{
// CHECK: hipEvent_t startTime, stopTime;
// CHECK: hipEventCreate(&startTime);
// CHECK: hipEventCreate(&stopTime);
// CHECK: hipEventRecord(startTime, bcsr.streamId);
cudaEvent_t startTime, stopTime;
cudaEventCreate(&startTime);
cudaEventCreate(&stopTime);
cudaEventRecord(startTime, bcsr.streamId);
// CHECK: cusparseDbsrmv(bcsr.cusparseHandle, HIPSPARSE_DIRECTION_COLUMN, HIPSPARSE_OPERATION_NON_TRANSPOSE,
cusparseDbsrmv(bcsr.cusparseHandle, CUSPARSE_DIRECTION_COLUMN, CUSPARSE_OPERATION_NON_TRANSPOSE,
bcsr.nbBlockRow, bcsr.m, bcsr.nbBlocks, &alpha, descr,
bcsr.cu_bsrValC, bcsr.cu_bsrRowPtrC, bcsr.cu_bsrColIndC, bcsr.blockSize,
cu_x, &beta, cu_y);
// CHECK: hipEventRecord(stopTime, bcsr.streamId);
// CHECK: hipEventSynchronize(stopTime);
// CHECK: hipEventElapsedTime(&gemvComputeTume, startTime, stopTime);
cudaEventRecord(stopTime, bcsr.streamId);
cudaEventSynchronize(stopTime);
cudaEventElapsedTime(&gemvComputeTume, startTime, stopTime);
gemvComputeTume /=1000.0;
}
// Get back result
// CHECK: CudaCheck( hipMemcpy(y, cu_y, bcsr.m*sizeof(double), hipMemcpyDeviceToHost ) );
CudaCheck( cudaMemcpy(y, cu_y, bcsr.m*sizeof(double), cudaMemcpyDeviceToHost ) );
// Dealloc vectors
// CHECK: CudaCheck(hipFree(cu_x));
// CHECK: CudaCheck(hipFree(cu_y));
CudaCheck(cudaFree(cu_x));
CudaCheck(cudaFree(cu_y));
return gemvComputeTume;
}
|
36db29c4cea403474727a82841ff35295dc84a90.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_y0f.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_y0f), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_y0f), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_y0f), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 36db29c4cea403474727a82841ff35295dc84a90.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_y0f.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_y0f<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_y0f<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_y0f<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b50a358cf7c4ce5910cd4f8aa534b572de887d01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device.cuh"
__device__ double datomicAdd(double* address, double val){
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while(assumed != old);
return __longlong_as_double(old);
}
__global__ void MatAdd(double kg[], double kl[], int dof[], int sizekl, int sizekg){
int i = blockIdx.x;
int j = threadIdx.x;
int k = threadIdx.y;
datomicAdd( &(kg[dof[i*sizekl + j]*sizekg + dof[i*sizekl + k]]), kl[i*sizekl*sizekl + j*sizekl +k]);
__syncthreads();
}
| b50a358cf7c4ce5910cd4f8aa534b572de887d01.cu | #include "device.cuh"
__device__ double datomicAdd(double* address, double val){
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while(assumed != old);
return __longlong_as_double(old);
}
__global__ void MatAdd(double kg[], double kl[], int dof[], int sizekl, int sizekg){
int i = blockIdx.x;
int j = threadIdx.x;
int k = threadIdx.y;
datomicAdd( &(kg[dof[i*sizekl + j]*sizekg + dof[i*sizekl + k]]), kl[i*sizekl*sizekl + j*sizekl +k]);
__syncthreads();
}
|
5121413512a15b8c7bdc575667e7dbaafd584669.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#if defined(HAVE_OPENCV_GPU) && !defined(DYNAMIC_CUDA_SUPPORT)
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/reduce.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/filters.hpp"
namespace cv { namespace gpu { namespace device
{
namespace surf
{
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold);
void loadOctaveConstants(int octave, int layer_rows, int layer_cols);
void bindImgTex(PtrStepSzb img);
size_t bindSumTex(PtrStepSz<unsigned int> sum);
size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum);
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayer);
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nLayers);
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter);
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures);
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures);
}
}}}
namespace cv { namespace gpu { namespace device
{
namespace surf
{
////////////////////////////////////////////////////////////////////////
// Global parameters
// The maximum number of features (before subpixel interpolation) that memory is reserved for.
__constant__ int c_max_candidates;
// The maximum number of features that memory is reserved for.
__constant__ int c_max_features;
// The image size.
__constant__ int c_img_rows;
__constant__ int c_img_cols;
// The number of layers.
__constant__ int c_nOctaveLayers;
// The hessian threshold.
__constant__ float c_hessianThreshold;
// The current octave.
__constant__ int c_octave;
// The current layer size.
__constant__ int c_layer_rows;
__constant__ int c_layer_cols;
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold)
{
cudaSafeCall( hipMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) );
cudaSafeCall( hipMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) );
cudaSafeCall( hipMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) );
cudaSafeCall( hipMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) );
cudaSafeCall( hipMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) );
cudaSafeCall( hipMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) );
}
void loadOctaveConstants(int octave, int layer_rows, int layer_cols)
{
cudaSafeCall( hipMemcpyToSymbol(c_octave, &octave, sizeof(octave)) );
cudaSafeCall( hipMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) );
cudaSafeCall( hipMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) );
}
////////////////////////////////////////////////////////////////////////
// Integral image texture
texture<unsigned char, 2, hipReadModeElementType> imgTex(0, hipFilterModePoint, hipAddressModeClamp);
texture<unsigned int, 2, hipReadModeElementType> sumTex(0, hipFilterModePoint, hipAddressModeClamp);
texture<unsigned int, 2, hipReadModeElementType> maskSumTex(0, hipFilterModePoint, hipAddressModeClamp);
void bindImgTex(PtrStepSzb img)
{
bindTexture(&imgTex, img);
}
size_t bindSumTex(PtrStepSz<uint> sum)
{
size_t offset;
hipChannelFormatDesc desc_sum = hipCreateChannelDesc<uint>();
cudaSafeCall( hipBindTexture2D(&offset, sumTex, sum.data, desc_sum, sum.cols, sum.rows, sum.step));
return offset / sizeof(uint);
}
size_t bindMaskSumTex(PtrStepSz<uint> maskSum)
{
size_t offset;
hipChannelFormatDesc desc_sum = hipCreateChannelDesc<uint>();
cudaSafeCall( hipBindTexture2D(&offset, maskSumTex, maskSum.data, desc_sum, maskSum.cols, maskSum.rows, maskSum.step));
return offset / sizeof(uint);
}
template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 200
typedef double real_t;
#else
typedef float real_t;
#endif
float ratio = (float)newSize / oldSize;
real_t d = 0;
#pragma unroll
for (int k = 0; k < N; ++k)
{
int dx1 = __float2int_rn(ratio * src[k][0]);
int dy1 = __float2int_rn(ratio * src[k][1]);
int dx2 = __float2int_rn(ratio * src[k][2]);
int dy2 = __float2int_rn(ratio * src[k][3]);
real_t t = 0;
t += tex2D(sumTex, x + dx1, y + dy1);
t -= tex2D(sumTex, x + dx1, y + dy2);
t -= tex2D(sumTex, x + dx2, y + dy1);
t += tex2D(sumTex, x + dx2, y + dy2);
d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1));
}
return (float)d;
}
////////////////////////////////////////////////////////////////////////
// Hessian
__constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} };
__constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };
__constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };
__host__ __device__ __forceinline__ int calcSize(int octave, int layer)
{
/* Wavelet size at first layer of first octave. */
const int HAAR_SIZE0 = 9;
/* Wavelet size increment between layers. This should be an even number,
such that the wavelet sizes in an octave are either all even or all odd.
This ensures that when looking for the neighbours of a sample, the layers
above and below are aligned correctly. */
const int HAAR_SIZE_INC = 6;
return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
}
__global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace)
{
// Determine the indices
const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2);
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int j = threadIdx.x + blockIdx.x * blockDim.x;
const int i = threadIdx.y + blockIdx_y * blockDim.y;
const int layer = blockIdx_z;
const int size = calcSize(c_octave, layer);
const int samples_i = 1 + ((c_img_rows - size) >> c_octave);
const int samples_j = 1 + ((c_img_cols - size) >> c_octave);
// Ignore pixels where some of the kernel is outside the image
const int margin = (size >> 1) >> c_octave;
if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j)
{
const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), (j << c_octave));
const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), (j << c_octave));
const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), (j << c_octave));
det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy;
trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy;
}
}
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayers)
{
const int min_size = calcSize(octave, 0);
const int max_samples_i = 1 + ((img_rows - min_size) >> octave);
const int max_samples_j = 1 + ((img_cols - min_size) >> octave);
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(max_samples_j, threads.x);
grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2);
hipLaunchKernelGGL(( icvCalcLayerDetAndTrace), dim3(grid), dim3(threads), 0, 0, det, trace);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// NONMAX
__constant__ float c_DM[5] = {0, 0, 9, 9, 1};
struct WithMask
{
static __device__ bool check(int sum_i, int sum_j, int size)
{
float ratio = (float)size / 9.0f;
float d = 0;
int dx1 = __float2int_rn(ratio * c_DM[0]);
int dy1 = __float2int_rn(ratio * c_DM[1]);
int dx2 = __float2int_rn(ratio * c_DM[2]);
int dy2 = __float2int_rn(ratio * c_DM[3]);
float t = 0;
t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1);
t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2);
t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1);
t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2);
d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1));
return (d >= 0.5f);
}
};
template <typename Mask>
__global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer,
unsigned int* maxCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
extern __shared__ float N9[];
// The hidx variables are the indices to the hessian buffer.
const int gridDim_y = gridDim.y / c_nOctaveLayers;
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int layer = blockIdx_z + 1;
const int size = calcSize(c_octave, layer);
// Ignore pixels without a 3x3x3 neighbourhood in the layer above
const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1;
const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1;
const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1;
// Is this thread within the hessian buffer?
const int zoff = blockDim.x * blockDim.y;
const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff;
N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
__syncthreads();
if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1)
{
float val0 = N9[localLin];
if (val0 > c_hessianThreshold)
{
// Coordinates for the start of the wavelet in the sum image. There
// is some integer division involved, so don't try to simplify this
// (cancel out sampleStep) without checking the result is the same
const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave;
if (Mask::check(sum_i, sum_j, size))
{
// Check to see if we have a max (in its 26 neighbours)
const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff]
&& val0 > N9[localLin - blockDim.x - zoff]
&& val0 > N9[localLin + 1 - blockDim.x - zoff]
&& val0 > N9[localLin - 1 - zoff]
&& val0 > N9[localLin - zoff]
&& val0 > N9[localLin + 1 - zoff]
&& val0 > N9[localLin - 1 + blockDim.x - zoff]
&& val0 > N9[localLin + blockDim.x - zoff]
&& val0 > N9[localLin + 1 + blockDim.x - zoff]
&& val0 > N9[localLin - 1 - blockDim.x]
&& val0 > N9[localLin - blockDim.x]
&& val0 > N9[localLin + 1 - blockDim.x]
&& val0 > N9[localLin - 1 ]
&& val0 > N9[localLin + 1 ]
&& val0 > N9[localLin - 1 + blockDim.x]
&& val0 > N9[localLin + blockDim.x]
&& val0 > N9[localLin + 1 + blockDim.x]
&& val0 > N9[localLin - 1 - blockDim.x + zoff]
&& val0 > N9[localLin - blockDim.x + zoff]
&& val0 > N9[localLin + 1 - blockDim.x + zoff]
&& val0 > N9[localLin - 1 + zoff]
&& val0 > N9[localLin + zoff]
&& val0 > N9[localLin + 1 + zoff]
&& val0 > N9[localLin - 1 + blockDim.x + zoff]
&& val0 > N9[localLin + blockDim.x + zoff]
&& val0 > N9[localLin + 1 + blockDim.x + zoff]
;
if(condmax)
{
unsigned int ind = atomicInc(maxCounter,(unsigned int) -1);
if (ind < c_max_candidates)
{
const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]);
maxPosBuffer[ind] = make_int4(j, i, layer, laplacian);
}
}
}
}
}
#endif
}
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers)
{
const int layer_rows = img_rows >> octave;
const int layer_cols = img_cols >> octave;
const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1;
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2);
grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers;
const size_t smem_size = threads.x * threads.y * 3 * sizeof(float);
if (use_mask)
hipLaunchKernelGGL(( icvFindMaximaInLayer<WithMask>), dim3(grid), dim3(threads), smem_size, 0, det, trace, maxPosBuffer, maxCounter);
else
hipLaunchKernelGGL(( icvFindMaximaInLayer<WithOutMask>), dim3(grid), dim3(threads), smem_size, 0, det, trace, maxPosBuffer, maxCounter);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// INTERPOLATION
__global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
const int4 maxPos = maxPosBuffer[blockIdx.x];
const int j = maxPos.x - 1 + threadIdx.x;
const int i = maxPos.y - 1 + threadIdx.y;
const int layer = maxPos.z - 1 + threadIdx.z;
__shared__ float N9[3][3][3];
N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j];
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
{
__shared__ float dD[3];
//dx
dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]);
//dy
dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]);
//ds
dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]);
__shared__ float H[3][3];
//dxx
H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2];
//dxy
H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]);
//dxs
H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]);
//dyx = dxy
H[1][0] = H[0][1];
//dyy
H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1];
//dys
H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]);
//dsx = dxs
H[2][0] = H[0][2];
//dsy = dys
H[2][1] = H[1][2];
//dss
H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1];
__shared__ float x[3];
if (solve3x3(H, dD, x))
{
if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f)
{
// if the step is within the interpolation region, perform it
const int size = calcSize(c_octave, maxPos.z);
const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave;
const float center_i = sum_i + (float)(size - 1) / 2;
const float center_j = sum_j + (float)(size - 1) / 2;
const float px = center_j + x[0] * (1 << c_octave);
const float py = center_i + x[1] * (1 << c_octave);
const int ds = size - calcSize(c_octave, maxPos.z - 1);
const float psize = roundf(size + x[2] * ds);
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = psize * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size)
{
// Get a new feature index.
unsigned int ind = atomicInc(featureCounter, (unsigned int)-1);
if (ind < c_max_features)
{
featureX[ind] = px;
featureY[ind] = py;
featureLaplacian[ind] = maxPos.w;
featureOctave[ind] = c_octave;
featureSize[ind] = psize;
featureHessian[ind] = N9[1][1][1];
}
} // grad_wav_size check
} // If the subpixel interpolation worked
}
} // If this is thread 0.
#endif
}
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
dim3 threads;
threads.x = 3;
threads.y = 3;
threads.z = 3;
dim3 grid;
grid.x = maxCounter;
hipLaunchKernelGGL(( icvInterpolateKeypoint), dim3(grid), dim3(threads), 0, 0, det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Orientation
#define ORI_SEARCH_INC 5
#define ORI_WIN 60
#define ORI_SAMPLES 113
__constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6};
__constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0};
__constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f};
__constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
__constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
__global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir)
{
__shared__ float s_X[128];
__shared__ float s_Y[128];
__shared__ float s_angle[128];
__shared__ float s_sumx[32 * 4];
__shared__ float s_sumy[32 * 4];
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size)
return;
// Calc X, Y, angle and store it to shared memory
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
float X = 0.0f, Y = 0.0f, angle = 0.0f;
if (tid < ORI_SAMPLES)
{
const float margin = (float)(grad_wav_size - 1) / 2.0f;
const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin);
const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin);
if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size &&
x >= 0 && x < (c_img_cols + 1) - grad_wav_size)
{
X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x);
Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x);
angle = atan2f(Y, X);
if (angle < 0)
angle += 2.0f * CV_PI_F;
angle *= 180.0f / CV_PI_F;
}
}
s_X[tid] = X;
s_Y[tid] = Y;
s_angle[tid] = angle;
__syncthreads();
float bestx = 0, besty = 0, best_mod = 0;
#if __CUDA_ARCH__ >= 200
#pragma unroll
#endif
for (int i = 0; i < 18; ++i)
{
const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC;
float sumx = 0.0f, sumy = 0.0f;
int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx = s_X[threadIdx.x];
sumy = s_Y[threadIdx.x];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 32];
sumy += s_Y[threadIdx.x + 32];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 64];
sumy += s_Y[threadIdx.x + 64];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 96];
sumy += s_Y[threadIdx.x + 96];
}
plus<float> op;
device::reduce<32>(smem_tuple(s_sumx + threadIdx.y * 32, s_sumy + threadIdx.y * 32),
thrust::tie(sumx, sumy), threadIdx.x, thrust::make_tuple(op, op));
const float temp_mod = sumx * sumx + sumy * sumy;
if (temp_mod > best_mod)
{
best_mod = temp_mod;
bestx = sumx;
besty = sumy;
}
__syncthreads();
}
if (threadIdx.x == 0)
{
s_X[threadIdx.y] = bestx;
s_Y[threadIdx.y] = besty;
s_angle[threadIdx.y] = best_mod;
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0)
{
int bestIdx = 0;
if (s_angle[1] > s_angle[bestIdx])
bestIdx = 1;
if (s_angle[2] > s_angle[bestIdx])
bestIdx = 2;
if (s_angle[3] > s_angle[bestIdx])
bestIdx = 3;
float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]);
if (kp_dir < 0)
kp_dir += 2.0f * CV_PI_F;
kp_dir *= 180.0f / CV_PI_F;
kp_dir = 360.0f - kp_dir;
if (::fabsf(kp_dir - 360.f) < numeric_limits<float>::epsilon())
kp_dir = 0.f;
featureDir[blockIdx.x] = kp_dir;
}
}
#undef ORI_SEARCH_INC
#undef ORI_WIN
#undef ORI_SAMPLES
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures)
{
dim3 threads;
threads.x = 32;
threads.y = 4;
dim3 grid;
grid.x = nFeatures;
hipLaunchKernelGGL(( icvCalcOrientation), dim3(grid), dim3(threads), 0, 0, featureX, featureY, featureSize, featureDir);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Descriptors
#define PATCH_SZ 20
__constant__ float c_DW[PATCH_SZ * PATCH_SZ] =
{
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f
};
struct WinReader
{
typedef uchar elem_type;
__device__ __forceinline__ uchar operator ()(int i, int j) const
{
float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir;
float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir;
return tex2D(imgTex, pixel_x, pixel_y);
}
float centerX;
float centerY;
float win_offset;
float cos_dir;
float sin_dir;
int width;
int height;
};
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy);
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy)
{
__shared__ float s_PATCH[PATCH_SZ + 1][PATCH_SZ + 1];
dx = dy = 0.0f;
WinReader win;
win.centerX = featureX[blockIdx.x];
win.centerY = featureY[blockIdx.x];
// The sampling intervals and wavelet sized for selecting an orientation
// and building the keypoint descriptor are defined relative to 's'
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
// Extract a window of pixels around the keypoint of size 20s
const int win_size = (int)((PATCH_SZ + 1) * s);
win.width = win.height = win_size;
// Nearest neighbour version (faster)
win.win_offset = -(win_size - 1.0f) / 2.0f;
float descriptor_dir = 360.0f - featureDir[blockIdx.x];
if (::fabsf(descriptor_dir - 360.f) < numeric_limits<float>::epsilon())
descriptor_dir = 0.f;
descriptor_dir *= CV_PI_F / 180.0f;
sincosf(descriptor_dir, &win.sin_dir, &win.cos_dir);
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int xLoadInd = tid % (PATCH_SZ + 1);
const int yLoadInd = tid / (PATCH_SZ + 1);
if (yLoadInd < (PATCH_SZ + 1))
{
if (s > 1)
{
AreaFilter<WinReader> filter(win, s, s);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd, xLoadInd);
}
else
{
LinearFilter<WinReader> filter(win);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd * s, xLoadInd * s);
}
}
__syncthreads();
const int xPatchInd = threadIdx.x % 5;
const int yPatchInd = threadIdx.x / 5;
if (yPatchInd < 5)
{
const int xBlockInd = threadIdx.y % 4;
const int yBlockInd = threadIdx.y / 4;
const int xInd = xBlockInd * 5 + xPatchInd;
const int yInd = yBlockInd * 5 + yPatchInd;
const float dw = c_DW[yInd * PATCH_SZ + xInd];
dx = (s_PATCH[yInd ][xInd + 1] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd + 1][xInd ]) * dw;
dy = (s_PATCH[yInd + 1][xInd ] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd ][xInd + 1]) * dw;
}
}
__global__ void compute_descriptors_64(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float dxabs = ::fabsf(dx);
float dyabs = ::fabsf(dy);
plus<float> op;
reduce<32>(sRow, dx, threadIdx.x, op);
reduce<32>(sRow, dy, threadIdx.x, op);
reduce<32>(sRow, dxabs, threadIdx.x, op);
reduce<32>(sRow, dyabs, threadIdx.x, op);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y;
// write dx, dy, |dx|, |dy|
if (threadIdx.x == 0)
*descriptors_block = make_float4(dx, dy, dxabs, dyabs);
}
__global__ void compute_descriptors_128(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y * 2;
plus<float> op;
float d1 = 0.0f;
float d2 = 0.0f;
float abs1 = 0.0f;
float abs2 = 0.0f;
if (dy >= 0)
{
d1 = dx;
abs1 = ::fabsf(dx);
}
else
{
d2 = dx;
abs2 = ::fabsf(dx);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0)
if (threadIdx.x == 0)
descriptors_block[0] = make_float4(d1, abs1, d2, abs2);
if (dx >= 0)
{
d1 = dy;
abs1 = ::fabsf(dy);
d2 = 0.0f;
abs2 = 0.0f;
}
else
{
d1 = 0.0f;
abs1 = 0.0f;
d2 = dy;
abs2 = ::fabsf(dy);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0)
if (threadIdx.x == 0)
descriptors_block[1] = make_float4(d1, abs1, d2, abs2);
}
template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors)
{
__shared__ float smem[BLOCK_DIM_X];
__shared__ float s_len;
// no need for thread ID
float* descriptor_base = descriptors.ptr(blockIdx.x);
// read in the unnormalized descriptor values (squared)
const float val = descriptor_base[threadIdx.x];
float len = val * val;
reduce<BLOCK_DIM_X>(smem, len, threadIdx.x, plus<float>());
if (threadIdx.x == 0)
s_len = ::sqrtf(len);
__syncthreads();
// normalize and store in output
descriptor_base[threadIdx.x] = val / s_len;
}
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures)
{
// compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D
if (descriptors.cols == 64)
{
hipLaunchKernelGGL(( compute_descriptors_64), dim3(nFeatures), dim3(dim3(32, 16)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( normalize_descriptors<64>), dim3(nFeatures), dim3(64), 0, 0, (PtrStepSzf) descriptors);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
else
{
hipLaunchKernelGGL(( compute_descriptors_128), dim3(nFeatures), dim3(dim3(32, 16)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( normalize_descriptors<128>), dim3(nFeatures), dim3(128), 0, 0, (PtrStepSzf) descriptors);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
}
} // namespace surf
}}} // namespace cv { namespace gpu { namespace device
#endif /* HAVE_OPENCV_GPU */
| 5121413512a15b8c7bdc575667e7dbaafd584669.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#if defined(HAVE_OPENCV_GPU) && !defined(DYNAMIC_CUDA_SUPPORT)
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/reduce.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/filters.hpp"
namespace cv { namespace gpu { namespace device
{
namespace surf
{
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold);
void loadOctaveConstants(int octave, int layer_rows, int layer_cols);
void bindImgTex(PtrStepSzb img);
size_t bindSumTex(PtrStepSz<unsigned int> sum);
size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum);
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayer);
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nLayers);
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter);
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures);
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures);
}
}}}
namespace cv { namespace gpu { namespace device
{
namespace surf
{
////////////////////////////////////////////////////////////////////////
// Global parameters
// The maximum number of features (before subpixel interpolation) that memory is reserved for.
__constant__ int c_max_candidates;
// The maximum number of features that memory is reserved for.
__constant__ int c_max_features;
// The image size.
__constant__ int c_img_rows;
__constant__ int c_img_cols;
// The number of layers.
__constant__ int c_nOctaveLayers;
// The hessian threshold.
__constant__ float c_hessianThreshold;
// The current octave.
__constant__ int c_octave;
// The current layer size.
__constant__ int c_layer_rows;
__constant__ int c_layer_cols;
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold)
{
cudaSafeCall( cudaMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) );
cudaSafeCall( cudaMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) );
cudaSafeCall( cudaMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) );
cudaSafeCall( cudaMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) );
cudaSafeCall( cudaMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) );
cudaSafeCall( cudaMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) );
}
void loadOctaveConstants(int octave, int layer_rows, int layer_cols)
{
cudaSafeCall( cudaMemcpyToSymbol(c_octave, &octave, sizeof(octave)) );
cudaSafeCall( cudaMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) );
cudaSafeCall( cudaMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) );
}
////////////////////////////////////////////////////////////////////////
// Integral image texture
texture<unsigned char, 2, cudaReadModeElementType> imgTex(0, cudaFilterModePoint, cudaAddressModeClamp);
texture<unsigned int, 2, cudaReadModeElementType> sumTex(0, cudaFilterModePoint, cudaAddressModeClamp);
texture<unsigned int, 2, cudaReadModeElementType> maskSumTex(0, cudaFilterModePoint, cudaAddressModeClamp);
void bindImgTex(PtrStepSzb img)
{
bindTexture(&imgTex, img);
}
size_t bindSumTex(PtrStepSz<uint> sum)
{
size_t offset;
cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>();
cudaSafeCall( cudaBindTexture2D(&offset, sumTex, sum.data, desc_sum, sum.cols, sum.rows, sum.step));
return offset / sizeof(uint);
}
size_t bindMaskSumTex(PtrStepSz<uint> maskSum)
{
size_t offset;
cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>();
cudaSafeCall( cudaBindTexture2D(&offset, maskSumTex, maskSum.data, desc_sum, maskSum.cols, maskSum.rows, maskSum.step));
return offset / sizeof(uint);
}
template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 200
typedef double real_t;
#else
typedef float real_t;
#endif
float ratio = (float)newSize / oldSize;
real_t d = 0;
#pragma unroll
for (int k = 0; k < N; ++k)
{
int dx1 = __float2int_rn(ratio * src[k][0]);
int dy1 = __float2int_rn(ratio * src[k][1]);
int dx2 = __float2int_rn(ratio * src[k][2]);
int dy2 = __float2int_rn(ratio * src[k][3]);
real_t t = 0;
t += tex2D(sumTex, x + dx1, y + dy1);
t -= tex2D(sumTex, x + dx1, y + dy2);
t -= tex2D(sumTex, x + dx2, y + dy1);
t += tex2D(sumTex, x + dx2, y + dy2);
d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1));
}
return (float)d;
}
////////////////////////////////////////////////////////////////////////
// Hessian
__constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} };
__constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };
__constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };
__host__ __device__ __forceinline__ int calcSize(int octave, int layer)
{
/* Wavelet size at first layer of first octave. */
const int HAAR_SIZE0 = 9;
/* Wavelet size increment between layers. This should be an even number,
such that the wavelet sizes in an octave are either all even or all odd.
This ensures that when looking for the neighbours of a sample, the layers
above and below are aligned correctly. */
const int HAAR_SIZE_INC = 6;
return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
}
__global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace)
{
// Determine the indices
const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2);
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int j = threadIdx.x + blockIdx.x * blockDim.x;
const int i = threadIdx.y + blockIdx_y * blockDim.y;
const int layer = blockIdx_z;
const int size = calcSize(c_octave, layer);
const int samples_i = 1 + ((c_img_rows - size) >> c_octave);
const int samples_j = 1 + ((c_img_cols - size) >> c_octave);
// Ignore pixels where some of the kernel is outside the image
const int margin = (size >> 1) >> c_octave;
if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j)
{
const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), (j << c_octave));
const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), (j << c_octave));
const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), (j << c_octave));
det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy;
trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy;
}
}
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayers)
{
const int min_size = calcSize(octave, 0);
const int max_samples_i = 1 + ((img_rows - min_size) >> octave);
const int max_samples_j = 1 + ((img_cols - min_size) >> octave);
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(max_samples_j, threads.x);
grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2);
icvCalcLayerDetAndTrace<<<grid, threads>>>(det, trace);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// NONMAX
__constant__ float c_DM[5] = {0, 0, 9, 9, 1};
struct WithMask
{
static __device__ bool check(int sum_i, int sum_j, int size)
{
float ratio = (float)size / 9.0f;
float d = 0;
int dx1 = __float2int_rn(ratio * c_DM[0]);
int dy1 = __float2int_rn(ratio * c_DM[1]);
int dx2 = __float2int_rn(ratio * c_DM[2]);
int dy2 = __float2int_rn(ratio * c_DM[3]);
float t = 0;
t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1);
t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2);
t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1);
t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2);
d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1));
return (d >= 0.5f);
}
};
template <typename Mask>
__global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer,
unsigned int* maxCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
extern __shared__ float N9[];
// The hidx variables are the indices to the hessian buffer.
const int gridDim_y = gridDim.y / c_nOctaveLayers;
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int layer = blockIdx_z + 1;
const int size = calcSize(c_octave, layer);
// Ignore pixels without a 3x3x3 neighbourhood in the layer above
const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1;
const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1;
const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1;
// Is this thread within the hessian buffer?
const int zoff = blockDim.x * blockDim.y;
const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff;
N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
__syncthreads();
if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1)
{
float val0 = N9[localLin];
if (val0 > c_hessianThreshold)
{
// Coordinates for the start of the wavelet in the sum image. There
// is some integer division involved, so don't try to simplify this
// (cancel out sampleStep) without checking the result is the same
const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave;
if (Mask::check(sum_i, sum_j, size))
{
// Check to see if we have a max (in its 26 neighbours)
const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff]
&& val0 > N9[localLin - blockDim.x - zoff]
&& val0 > N9[localLin + 1 - blockDim.x - zoff]
&& val0 > N9[localLin - 1 - zoff]
&& val0 > N9[localLin - zoff]
&& val0 > N9[localLin + 1 - zoff]
&& val0 > N9[localLin - 1 + blockDim.x - zoff]
&& val0 > N9[localLin + blockDim.x - zoff]
&& val0 > N9[localLin + 1 + blockDim.x - zoff]
&& val0 > N9[localLin - 1 - blockDim.x]
&& val0 > N9[localLin - blockDim.x]
&& val0 > N9[localLin + 1 - blockDim.x]
&& val0 > N9[localLin - 1 ]
&& val0 > N9[localLin + 1 ]
&& val0 > N9[localLin - 1 + blockDim.x]
&& val0 > N9[localLin + blockDim.x]
&& val0 > N9[localLin + 1 + blockDim.x]
&& val0 > N9[localLin - 1 - blockDim.x + zoff]
&& val0 > N9[localLin - blockDim.x + zoff]
&& val0 > N9[localLin + 1 - blockDim.x + zoff]
&& val0 > N9[localLin - 1 + zoff]
&& val0 > N9[localLin + zoff]
&& val0 > N9[localLin + 1 + zoff]
&& val0 > N9[localLin - 1 + blockDim.x + zoff]
&& val0 > N9[localLin + blockDim.x + zoff]
&& val0 > N9[localLin + 1 + blockDim.x + zoff]
;
if(condmax)
{
unsigned int ind = atomicInc(maxCounter,(unsigned int) -1);
if (ind < c_max_candidates)
{
const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]);
maxPosBuffer[ind] = make_int4(j, i, layer, laplacian);
}
}
}
}
}
#endif
}
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers)
{
const int layer_rows = img_rows >> octave;
const int layer_cols = img_cols >> octave;
const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1;
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2);
grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers;
const size_t smem_size = threads.x * threads.y * 3 * sizeof(float);
if (use_mask)
icvFindMaximaInLayer<WithMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter);
else
icvFindMaximaInLayer<WithOutMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// INTERPOLATION
__global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
const int4 maxPos = maxPosBuffer[blockIdx.x];
const int j = maxPos.x - 1 + threadIdx.x;
const int i = maxPos.y - 1 + threadIdx.y;
const int layer = maxPos.z - 1 + threadIdx.z;
__shared__ float N9[3][3][3];
N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j];
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
{
__shared__ float dD[3];
//dx
dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]);
//dy
dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]);
//ds
dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]);
__shared__ float H[3][3];
//dxx
H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2];
//dxy
H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]);
//dxs
H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]);
//dyx = dxy
H[1][0] = H[0][1];
//dyy
H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1];
//dys
H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]);
//dsx = dxs
H[2][0] = H[0][2];
//dsy = dys
H[2][1] = H[1][2];
//dss
H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1];
__shared__ float x[3];
if (solve3x3(H, dD, x))
{
if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f)
{
// if the step is within the interpolation region, perform it
const int size = calcSize(c_octave, maxPos.z);
const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave;
const float center_i = sum_i + (float)(size - 1) / 2;
const float center_j = sum_j + (float)(size - 1) / 2;
const float px = center_j + x[0] * (1 << c_octave);
const float py = center_i + x[1] * (1 << c_octave);
const int ds = size - calcSize(c_octave, maxPos.z - 1);
const float psize = roundf(size + x[2] * ds);
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = psize * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size)
{
// Get a new feature index.
unsigned int ind = atomicInc(featureCounter, (unsigned int)-1);
if (ind < c_max_features)
{
featureX[ind] = px;
featureY[ind] = py;
featureLaplacian[ind] = maxPos.w;
featureOctave[ind] = c_octave;
featureSize[ind] = psize;
featureHessian[ind] = N9[1][1][1];
}
} // grad_wav_size check
} // If the subpixel interpolation worked
}
} // If this is thread 0.
#endif
}
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
dim3 threads;
threads.x = 3;
threads.y = 3;
threads.z = 3;
dim3 grid;
grid.x = maxCounter;
icvInterpolateKeypoint<<<grid, threads>>>(det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Orientation
#define ORI_SEARCH_INC 5
#define ORI_WIN 60
#define ORI_SAMPLES 113
__constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6};
__constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0};
__constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f};
__constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
__constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
__global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir)
{
__shared__ float s_X[128];
__shared__ float s_Y[128];
__shared__ float s_angle[128];
__shared__ float s_sumx[32 * 4];
__shared__ float s_sumy[32 * 4];
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size)
return;
// Calc X, Y, angle and store it to shared memory
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
float X = 0.0f, Y = 0.0f, angle = 0.0f;
if (tid < ORI_SAMPLES)
{
const float margin = (float)(grad_wav_size - 1) / 2.0f;
const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin);
const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin);
if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size &&
x >= 0 && x < (c_img_cols + 1) - grad_wav_size)
{
X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x);
Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x);
angle = atan2f(Y, X);
if (angle < 0)
angle += 2.0f * CV_PI_F;
angle *= 180.0f / CV_PI_F;
}
}
s_X[tid] = X;
s_Y[tid] = Y;
s_angle[tid] = angle;
__syncthreads();
float bestx = 0, besty = 0, best_mod = 0;
#if __CUDA_ARCH__ >= 200
#pragma unroll
#endif
for (int i = 0; i < 18; ++i)
{
const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC;
float sumx = 0.0f, sumy = 0.0f;
int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx = s_X[threadIdx.x];
sumy = s_Y[threadIdx.x];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 32];
sumy += s_Y[threadIdx.x + 32];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 64];
sumy += s_Y[threadIdx.x + 64];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 96];
sumy += s_Y[threadIdx.x + 96];
}
plus<float> op;
device::reduce<32>(smem_tuple(s_sumx + threadIdx.y * 32, s_sumy + threadIdx.y * 32),
thrust::tie(sumx, sumy), threadIdx.x, thrust::make_tuple(op, op));
const float temp_mod = sumx * sumx + sumy * sumy;
if (temp_mod > best_mod)
{
best_mod = temp_mod;
bestx = sumx;
besty = sumy;
}
__syncthreads();
}
if (threadIdx.x == 0)
{
s_X[threadIdx.y] = bestx;
s_Y[threadIdx.y] = besty;
s_angle[threadIdx.y] = best_mod;
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0)
{
int bestIdx = 0;
if (s_angle[1] > s_angle[bestIdx])
bestIdx = 1;
if (s_angle[2] > s_angle[bestIdx])
bestIdx = 2;
if (s_angle[3] > s_angle[bestIdx])
bestIdx = 3;
float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]);
if (kp_dir < 0)
kp_dir += 2.0f * CV_PI_F;
kp_dir *= 180.0f / CV_PI_F;
kp_dir = 360.0f - kp_dir;
if (::fabsf(kp_dir - 360.f) < numeric_limits<float>::epsilon())
kp_dir = 0.f;
featureDir[blockIdx.x] = kp_dir;
}
}
#undef ORI_SEARCH_INC
#undef ORI_WIN
#undef ORI_SAMPLES
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures)
{
dim3 threads;
threads.x = 32;
threads.y = 4;
dim3 grid;
grid.x = nFeatures;
icvCalcOrientation<<<grid, threads>>>(featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Descriptors
#define PATCH_SZ 20
__constant__ float c_DW[PATCH_SZ * PATCH_SZ] =
{
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f
};
struct WinReader
{
typedef uchar elem_type;
__device__ __forceinline__ uchar operator ()(int i, int j) const
{
float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir;
float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir;
return tex2D(imgTex, pixel_x, pixel_y);
}
float centerX;
float centerY;
float win_offset;
float cos_dir;
float sin_dir;
int width;
int height;
};
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy);
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy)
{
__shared__ float s_PATCH[PATCH_SZ + 1][PATCH_SZ + 1];
dx = dy = 0.0f;
WinReader win;
win.centerX = featureX[blockIdx.x];
win.centerY = featureY[blockIdx.x];
// The sampling intervals and wavelet sized for selecting an orientation
// and building the keypoint descriptor are defined relative to 's'
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
// Extract a window of pixels around the keypoint of size 20s
const int win_size = (int)((PATCH_SZ + 1) * s);
win.width = win.height = win_size;
// Nearest neighbour version (faster)
win.win_offset = -(win_size - 1.0f) / 2.0f;
float descriptor_dir = 360.0f - featureDir[blockIdx.x];
if (::fabsf(descriptor_dir - 360.f) < numeric_limits<float>::epsilon())
descriptor_dir = 0.f;
descriptor_dir *= CV_PI_F / 180.0f;
sincosf(descriptor_dir, &win.sin_dir, &win.cos_dir);
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int xLoadInd = tid % (PATCH_SZ + 1);
const int yLoadInd = tid / (PATCH_SZ + 1);
if (yLoadInd < (PATCH_SZ + 1))
{
if (s > 1)
{
AreaFilter<WinReader> filter(win, s, s);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd, xLoadInd);
}
else
{
LinearFilter<WinReader> filter(win);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd * s, xLoadInd * s);
}
}
__syncthreads();
const int xPatchInd = threadIdx.x % 5;
const int yPatchInd = threadIdx.x / 5;
if (yPatchInd < 5)
{
const int xBlockInd = threadIdx.y % 4;
const int yBlockInd = threadIdx.y / 4;
const int xInd = xBlockInd * 5 + xPatchInd;
const int yInd = yBlockInd * 5 + yPatchInd;
const float dw = c_DW[yInd * PATCH_SZ + xInd];
dx = (s_PATCH[yInd ][xInd + 1] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd + 1][xInd ]) * dw;
dy = (s_PATCH[yInd + 1][xInd ] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd ][xInd + 1]) * dw;
}
}
__global__ void compute_descriptors_64(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float dxabs = ::fabsf(dx);
float dyabs = ::fabsf(dy);
plus<float> op;
reduce<32>(sRow, dx, threadIdx.x, op);
reduce<32>(sRow, dy, threadIdx.x, op);
reduce<32>(sRow, dxabs, threadIdx.x, op);
reduce<32>(sRow, dyabs, threadIdx.x, op);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y;
// write dx, dy, |dx|, |dy|
if (threadIdx.x == 0)
*descriptors_block = make_float4(dx, dy, dxabs, dyabs);
}
__global__ void compute_descriptors_128(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y * 2;
plus<float> op;
float d1 = 0.0f;
float d2 = 0.0f;
float abs1 = 0.0f;
float abs2 = 0.0f;
if (dy >= 0)
{
d1 = dx;
abs1 = ::fabsf(dx);
}
else
{
d2 = dx;
abs2 = ::fabsf(dx);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0)
if (threadIdx.x == 0)
descriptors_block[0] = make_float4(d1, abs1, d2, abs2);
if (dx >= 0)
{
d1 = dy;
abs1 = ::fabsf(dy);
d2 = 0.0f;
abs2 = 0.0f;
}
else
{
d1 = 0.0f;
abs1 = 0.0f;
d2 = dy;
abs2 = ::fabsf(dy);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0)
if (threadIdx.x == 0)
descriptors_block[1] = make_float4(d1, abs1, d2, abs2);
}
template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors)
{
__shared__ float smem[BLOCK_DIM_X];
__shared__ float s_len;
// no need for thread ID
float* descriptor_base = descriptors.ptr(blockIdx.x);
// read in the unnormalized descriptor values (squared)
const float val = descriptor_base[threadIdx.x];
float len = val * val;
reduce<BLOCK_DIM_X>(smem, len, threadIdx.x, plus<float>());
if (threadIdx.x == 0)
s_len = ::sqrtf(len);
__syncthreads();
// normalize and store in output
descriptor_base[threadIdx.x] = val / s_len;
}
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures)
{
// compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D
if (descriptors.cols == 64)
{
compute_descriptors_64<<<nFeatures, dim3(32, 16)>>>(descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
normalize_descriptors<64><<<nFeatures, 64>>>((PtrStepSzf) descriptors);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
else
{
compute_descriptors_128<<<nFeatures, dim3(32, 16)>>>(descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
normalize_descriptors<128><<<nFeatures, 128>>>((PtrStepSzf) descriptors);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
}
} // namespace surf
}}} // namespace cv { namespace gpu { namespace device
#endif /* HAVE_OPENCV_GPU */
|
dd12e78fae342d027b654c08e23a4d61ffbaa17a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! SoftmaxCrossEntropy <T = float32, Device = CUDA> */
template <typename T>
__global__ void _SoftmaxCrossEntropy(
const int count,
const T* prob,
const T* target,
T* loss) {
CUDA_1D_KERNEL_LOOP(idx, count) {
loss[idx] = -target[idx] * log(max(prob[idx], FLT_MIN));
}
}
template <> void SoftmaxCrossEntropy<float, CUDAContext>(
const int count,
const float* prob,
const float* target,
float* loss,
CUDAContext* ctx) {
_SoftmaxCrossEntropy<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, prob, target, loss);
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA | dd12e78fae342d027b654c08e23a4d61ffbaa17a.cu | #ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! SoftmaxCrossEntropy <T = float32, Device = CUDA> */
template <typename T>
__global__ void _SoftmaxCrossEntropy(
const int count,
const T* prob,
const T* target,
T* loss) {
CUDA_1D_KERNEL_LOOP(idx, count) {
loss[idx] = -target[idx] * log(max(prob[idx], FLT_MIN));
}
}
template <> void SoftmaxCrossEntropy<float, CUDAContext>(
const int count,
const float* prob,
const float* target,
float* loss,
CUDAContext* ctx) {
_SoftmaxCrossEntropy<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, prob, target, loss);
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA |
6169eedcb37c162e45511fdf5a29755c606c599d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@generated d Wed Nov 14 22:53:49 2012
*/
#include "common_magma.h"
/*********************************************************/
/*
* Swap diagonal blocks of two matrices.
* For more detail see the description below.
*/
__global__ void
magmagpu_dswapdblk(magma_int_t nb,
double *dA1, magma_int_t ldda1, magma_int_t inca1,
double *dA2, magma_int_t ldda2, magma_int_t inca2 )
{
const magma_int_t tx = threadIdx.x;
const magma_int_t bx = blockIdx.x;
dA1 += tx + bx * nb * (ldda1 + inca1);
dA2 += tx + bx * nb * (ldda2 + inca2);
double tmp;
#pragma unroll
for( magma_int_t i = 0; i < nb; i++ ){
tmp = dA1[i*ldda1];
dA1[i*ldda1] = dA2[i*ldda2];
dA2[i*ldda2] = tmp;
}
}
extern "C" void
magmablas_dswapdblk(magma_int_t n, magma_int_t nb,
double *dA1, magma_int_t ldda1, magma_int_t inca1,
double *dA2, magma_int_t ldda2, magma_int_t inca2 )
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose
=======
This is an auxiliary MAGMA routine. It swaps diagonal blocks
of size nb x nb between matrices dA1 and dA2 on the GPU.
The number of blocks swapped is (n-1)/nb. For i = 1 .. (n-1)/nb matrices
dA1 + i * nb * (ldda1 + inca1) and
dA2 + i * nb * (ldda2 + inca2) are swapped.
*/
magma_int_t blocksize = nb;
dim3 blocks( (n-1) / blocksize, 1, 1);
hipLaunchKernelGGL(( magmagpu_dswapdblk), dim3(blocks), dim3(blocksize), 0, magma_stream , nb,
dA1, ldda1, inca1,
dA2, ldda2, inca2 );
}
| 6169eedcb37c162e45511fdf5a29755c606c599d.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@generated d Wed Nov 14 22:53:49 2012
*/
#include "common_magma.h"
/*********************************************************/
/*
* Swap diagonal blocks of two matrices.
* For more detail see the description below.
*/
__global__ void
magmagpu_dswapdblk(magma_int_t nb,
double *dA1, magma_int_t ldda1, magma_int_t inca1,
double *dA2, magma_int_t ldda2, magma_int_t inca2 )
{
const magma_int_t tx = threadIdx.x;
const magma_int_t bx = blockIdx.x;
dA1 += tx + bx * nb * (ldda1 + inca1);
dA2 += tx + bx * nb * (ldda2 + inca2);
double tmp;
#pragma unroll
for( magma_int_t i = 0; i < nb; i++ ){
tmp = dA1[i*ldda1];
dA1[i*ldda1] = dA2[i*ldda2];
dA2[i*ldda2] = tmp;
}
}
extern "C" void
magmablas_dswapdblk(magma_int_t n, magma_int_t nb,
double *dA1, magma_int_t ldda1, magma_int_t inca1,
double *dA2, magma_int_t ldda2, magma_int_t inca2 )
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose
=======
This is an auxiliary MAGMA routine. It swaps diagonal blocks
of size nb x nb between matrices dA1 and dA2 on the GPU.
The number of blocks swapped is (n-1)/nb. For i = 1 .. (n-1)/nb matrices
dA1 + i * nb * (ldda1 + inca1) and
dA2 + i * nb * (ldda2 + inca2) are swapped.
*/
magma_int_t blocksize = nb;
dim3 blocks( (n-1) / blocksize, 1, 1);
magmagpu_dswapdblk<<< blocks, blocksize, 0, magma_stream >>>( nb,
dA1, ldda1, inca1,
dA2, ldda2, inca2 );
}
|
c10ae9dbb2423bd15baf911141a52ae08bc6e40b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef WITH_CUDA
#include <cmath>
#include "core/context_cuda.h"
#include "core/tensor.h"
#include "utils/cuda_device.h"
#include "utils/op_kernel.h"
#include "utils/math_functions.h"
#include "utils/cast.h"
namespace dragon {
namespace kernel {
/******************** activation.dropout ********************/
template<typename T>
__global__ void _Dropout(
const int count,
const uint32_t thresh,
const T scale,
const T* x,
const uint32_t* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = x[idx] * (mask[idx] > thresh) * scale;
}
}
template<> void Dropout<float, CUDAContext>(
const int count,
float prob,
float scale,
const float* x,
uint32_t* mask,
float* y,
CUDAContext* ctx) {
uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob);
math::RandomUniform<uint32_t, CUDAContext>(
count, float(0), float(UINT_MAX), mask, ctx);
_Dropout<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, thresh, scale, x, mask, y);
}
template <typename T>
__global__ void _DropoutGrad(
const int count,
const uint32_t thresh,
const T scale,
const T* dy,
const uint32_t* mask,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = dy[idx] * (mask[idx] > thresh) * scale;
}
}
template<> void DropoutGrad<float, CUDAContext>(
const int count,
float prob,
float scale,
const float* dy,
const uint32_t* mask,
float* dx,
CUDAContext* ctx) {
uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob);
_DropoutGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, thresh, scale, dy, mask, dx);
}
/******************** activation.prelu ********************/
template <typename T>
__global__ void _PRelu(
const int count,
const int channels,
const int dim,
const T* x,
const T* w,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = (x[idx] > 0) * x[idx] +
(x[idx] < 0) * x[idx] * w[0];
}
}
template <typename T>
__global__ void _PReluNCHW(
const int count,
const int channels,
const int dim,
const T* x,
const T* w,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = (idx / dim) % channels;
y[idx] = (x[idx] > 0) * x[idx] +
(x[idx] < 0) * x[idx] * w[c];
}
}
template <typename T>
__global__ void _PReluNHWC(
const int count,
const int channels,
const int dim,
const T* x,
const T* w,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % channels;
y[idx] = (x[idx] > 0) * x[idx] +
(x[idx] < 0) * x[idx] * w[c];
}
}
template<> void PRelu<float, CUDAContext>(const int count,
const int channels,
const int dim,
const bool channel_shared,
const string& data_format,
const float* x,
const float* w,
float* y,
CUDAContext* ctx) {
if (channel_shared) {
_PRelu<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
channels, dim, x, w, y);
} else {
if (data_format == "NCHW") {
_PReluNCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
channels, dim, x, w, y);
} else if (data_format == "NHWC") {
_PReluNHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
channels, dim, x, w, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
}
template <typename T>
__global__ void _PReluGrad(
const int count,
const int channels,
const int dim,
const T* dy,
const T* x,
const T* w,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = dy[idx] * (
(x[idx] > 0) + (x[idx] <= 0) * w[0]
);
}
}
template <typename T>
__global__ void _PReluGradNCHW(
const int count,
const int channels,
const int dim,
const T* dy,
const T* x,
const T* w,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = (idx / dim) % channels;
dx[idx] = dy[idx] * (
(x[idx] > 0) + (x[idx] <= 0) * w[c]
);
}
}
template <typename T>
__global__ void _PReluGradNHWC(
const int count,
const int channels,
const int dim,
const T* dy,
const T* x,
const T* w,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % channels;
dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]);
}
}
template<> void PReluGrad<float, CUDAContext>(
const int count,
const int channels,
const int dim,
const bool channel_shared,
const string& data_format,
const float* dy,
const float* x,
const float* w,
float* dx,
CUDAContext* ctx) {
if (channel_shared) {
_PReluGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
channels, dim, dy, x, w, dx);
} else {
if (data_format == "NCHW") {
_PReluGradNCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
channels, dim, dy, x, w, dx);
} else if (data_format == "NHWC") {
_PReluGradNHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
channels, dim, dy, x, w, dx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
}
template <typename T>
__global__ void _PReluWGradBcast(
const int count,
const int rows,
const int row_offset,
const T* dy,
const T* x,
T* bcast_dw) {
CUDA_1D_KERNEL_LOOP(idx, count) {
bcast_dw[idx] = dy[idx] * x[idx] * (x[idx] <= 0);
for (int n = 1; n < rows; n++) {
const int cur_idx = idx + n * row_offset;
bcast_dw[idx] +=
dy[cur_idx] * x[cur_idx] * (x[cur_idx] <= 0);
}
}
}
template<> void PReluWGrad<float, CUDAContext>(
const int rows,
const int row_offset,
const int channels,
const int dim,
const bool channel_shared,
const string& data_format,
const float* dy,
const float* x,
const float* multiplier,
float* bcast_dw,
float* dw,
CUDAContext* ctx) {
const int cdim = channels * dim;
_PReluWGradBcast<float>
<< < CUDA_BLOCKS(cdim), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
cdim, rows, row_offset, dy, x, bcast_dw);
if (channel_shared) {
float w_sum;
math::Dot<float, CUDAContext>(channels * dim,
bcast_dw, multiplier, &w_sum, ctx);
math::AddScalar<float, CUDAContext>(1, w_sum, dw, ctx);
} else {
if (data_format == "NCHW") {
math::Gemv<float, CUDAContext>(
CblasNoTrans, channels, dim,
1.0, bcast_dw, multiplier, 1.0, dw, ctx);
} else if (data_format == "NHWC") {
math::Gemv<float, CUDAContext>(
CblasTrans, dim, channels,
1.0, bcast_dw, multiplier, 1.0, dw, ctx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
}
/******************** activation.elu ********************/
template <typename T>
__global__ void _Elu(
const int count,
const T* x,
const float alpha,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = x[idx] > 0 ? x[idx] :
alpha * (exp(x[idx]) - 1);
}
}
template<> void Elu<float, CUDAContext>(
const int count,
const float alpha,
const float* x,
float* y,
CUDAContext* ctx) {
_Elu<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, x, alpha, y);
}
template <typename T>
__global__ void _EluGrad(
const int count,
const float alpha,
const T* dy,
const T* y,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = dy[idx] * (
(y[idx] > 0) + (alpha + y[idx]) * (y[idx] <= 0)
);
}
}
template<> void EluGrad<float, CUDAContext>(
const int count,
const float alpha,
const float* dy,
const float* y,
float* dx,
CUDAContext* ctx) {
_EluGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, alpha, dy, y, dx);
}
/******************** activation.relu ********************/
template <typename T>
__global__ void _Relu(
const int count,
const float slope,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = x[idx] > 0 ? x[idx] : x[idx] * slope;
}
}
template<> void Relu<float, CUDAContext>(
const int count,
const float slope,
const float* x,
float* y,
CUDAContext* ctx) {
_Relu<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, slope, x, y);
}
template <typename T>
__global__ void _ReluGrad(
const int count,
const float slope,
const T* dy,
const T* y,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = dy[idx] * (
(y[idx] > 0) + slope * (y[idx] <= 0)
);
}
}
template<> void ReluGrad<float, CUDAContext>(
const int count,
const float slope,
const float* dy,
const float* y,
float* dx,
CUDAContext* ctx) {
_ReluGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, slope, dy, y, dx);
}
/******************** activation.selu ********************/
template <typename T>
__global__ void _SElu(
const int count,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = x[idx] > 0 ? 1.0507 * x[idx] :
1.7581 * (exp(x[idx]) - 1);
}
}
template<> void SElu<float, CUDAContext>(
const int count,
const float* x,
float* y,
CUDAContext* ctx) {
_SElu<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, x, y);
}
template <typename T>
__global__ void _SEluGrad(
const int count,
const T* dy,
const T* y,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = y[idx] > 0 ? 1.0507 * dy[idx] :
(1.7581 + y[idx]) * dy[idx];
}
}
template<> void SEluGrad<float, CUDAContext>(
const int count,
const float* dy,
const float* y,
float* dx,
CUDAContext* ctx) {
_SEluGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, dy, y, dx);
}
/******************** activation.sigmoid ********************/
template <typename T>
__device__ T _SigmoidUnit(const T x) {
return T(1) / (T(1) + exp(-x));
}
template <typename T>
__global__ void _Sigmoid(
const int n,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, n) {
y[idx] = _SigmoidUnit<T>(x[idx]);
}
}
template<> void Sigmoid<float, CUDAContext>(
const int count,
const float* x,
float* y,
CUDAContext* ctx) {
_Sigmoid<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, x, y);
}
template <typename T>
__global__ void _SigmoidGrad(
const int count,
const T* dy,
const T* y,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = dy[idx] * y[idx] * (1 - y[idx]);
}
}
template<> void SigmoidGrad<float, CUDAContext>(
const int count,
const float* dy,
const float* y,
float* dx,
CUDAContext* ctx) {
_SigmoidGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, dy, y, dx);
}
/******************** activation.softmax ********************/
template <typename T>
__global__ void _SoftmaxMaxClass(
const int outer_dim,
const int classes,
const int inner_dim,
const T* x,
T* scale) {
CUDA_1D_KERNEL_LOOP(idx, outer_dim * inner_dim) {
int o_idx = idx / inner_dim;
int i_idx = idx % inner_dim;
T max_val = -FLT_MAX;
for (int c = 0; c < classes; c++)
max_val = max(
x[(o_idx * classes + c) * inner_dim + i_idx], max_val
);
scale[idx] = max_val;
}
}
template <typename T>
__global__ void _SoftmaxSubtract(
const int count,
const int classes,
const int inner_dim,
const T* scale,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int o_idx = idx / inner_dim / classes;
int i_idx = idx % inner_dim;
y[idx] -= scale[o_idx * inner_dim + i_idx];
}
}
template <typename T>
__global__ void _SoftmaxExp(
const int count,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = exp(y[idx]);
}
}
template <typename T>
__global__ void _SoftmaxSumClass(
const int outer_dim,
const int classes,
const int inner_dim,
const T* y,
T* scale) {
CUDA_1D_KERNEL_LOOP(idx, outer_dim * inner_dim) {
int o_idx = idx / inner_dim;
int i_idx = idx % inner_dim;
T sum = 0;
for (int c = 0; c < classes; c++)
sum += y[(o_idx * classes + c) * inner_dim + i_idx];
scale[idx] = sum;
}
}
template <typename T>
__global__ void _SoftmaxDiv(
const int count,
const int classes,
const int inner_dim,
const T* scale,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int o_idx = idx / inner_dim / classes;
int i_idx = idx % inner_dim;
y[idx] /= scale[o_idx * inner_dim + i_idx];
}
}
template<> void Softmax<float, CUDAContext>(
const int count,
const int classes,
const int outer_dim,
const int inner_dim,
const float* sum_multiplier,
const float* x,
float* scale,
float* y,
CUDAContext* ctx) {
const int num_preds = inner_dim * outer_dim;
_SoftmaxMaxClass<float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
outer_dim, classes, inner_dim, x, scale);
_SoftmaxSubtract<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, classes, inner_dim, scale, y);
_SoftmaxExp<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, y);
_SoftmaxSumClass<float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
outer_dim, classes, inner_dim, y, scale);
_SoftmaxDiv<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, classes, inner_dim, scale, y);
}
template <typename T>
__global__ void _SoftmaxDot(
const int outer_dim,
const int classes,
const int inner_dim,
const T* dy,
const T* y,
T* scale) {
CUDA_1D_KERNEL_LOOP(idx, outer_dim * inner_dim) {
int o_idx = idx / inner_dim;
int i_idx = idx % inner_dim;
T dot = 0;
for (int c = 0; c < classes; c++)
dot += (
y[(o_idx * classes + c) * inner_dim + i_idx] *
dy[(o_idx * classes + c) * inner_dim + i_idx]
);
scale[idx] = dot;
}
}
template<> void SoftmaxGrad<float, CUDAContext>(
const int count,
const int classes,
const int outer_dim,
const int inner_dim,
const float* sum_multiplier,
const float* dy,
const float* y,
float* scale,
float* dx,
CUDAContext* ctx) {
const int num_preds = inner_dim * outer_dim;
_SoftmaxDot<float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
outer_dim, classes, inner_dim, dy, y, scale);
_SoftmaxSubtract<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, classes,inner_dim, scale, dx);
math::Mul<float, CUDAContext>(count, dx, y, dx, ctx);
}
/******************** activation.tanh ********************/
template <typename T>
__global__ void _Tanh(
const int count,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, count) {
y[i] = tanh(x[i]);
}
}
template<> void Tanh<float, CUDAContext>(
const int count,
const float* x,
float* y,
CUDAContext* ctx) {
_Tanh<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, x, y);
}
template <typename T>
__global__ void _TanhGrad(
const int count,
const T* dy,
const T* y,
T* dx) {
CUDA_1D_KERNEL_LOOP(i, count) {
dx[i] = dy[i] * (1 - y[i] * y[i]);
}
}
template<> void TanhGrad<float, CUDAContext>(
const int count,
const float* dy,
const float* y,
float* dx,
CUDAContext* ctx) {
_TanhGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, dy, y, dx);
}
/******************** arithmetic.scale ********************/
template <typename T>
__global__ void _AffineWithOBias(
const int count,
const int scale_dim,
const int inner_dim,
const T* x,
const T* alpha,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int scale_idx = (idx / inner_dim) % scale_dim;
y[idx] = alpha[scale_idx] * x[idx];
}
}
template <typename T>
__global__ void _AffineWithBias(
const int count,
const int scale_dim,
const int inner_dim,
const T* x,
const T* alpha,
const T* beta,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int scale_idx = (idx / inner_dim) % scale_dim;
y[idx] = alpha[scale_idx] * x[idx] + beta[scale_idx];
}
}
template<> void Affine<float, CUDAContext>(
const int count,
const int outer_dim,
const int scale_dim,
const int inner_dim,
const float* x,
const float* alpha,
const float* beta,
const float* beta_multiplier,
float* y,
CUDAContext* ctx) {
if (beta != nullptr) {
_AffineWithBias<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, scale_dim, inner_dim,
x, alpha, beta, y);
} else {
_AffineWithOBias<float>
<< <CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, scale_dim, inner_dim,
x, alpha, y);
}
}
template <> void AffineGrad<float, CUDAContext>(
const int count,
const int outer_dim,
const int scale_dim,
const int inner_dim,
const float* dy,
const float* alpha,
float* dx,
CUDAContext* ctx) {
_AffineWithOBias<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, scale_dim, inner_dim,
dy, alpha, dx);
}
/******************** arithmetic.clip ********************/
template <typename T>
__global__ void _Clip(
const int count,
const T low,
const T high,
const T* x,
T* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
mask[idx] = 1.0;
if (x[idx] > high || x[idx] < low) mask[idx] = 0.0;
y[idx] = x[idx] > high ? high : x[idx];
y[idx] = x[idx] < low ? low : x[idx];
}
}
template <> void Clip<float, CUDAContext>(
const int count,
const float low,
const float high,
const float* x,
float* mask,
float* y,
CUDAContext* ctx) {
_Clip<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
low, high, x, mask, y);
}
/******************** control_flow.compare ********************/
template <typename T>
__global__ void _Equal(
const int count,
const T* a,
const T* b,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = fabs(a[idx] - b[idx]) < FLT_EPSILON ? 1.0 : 0.0;
}
}
template <> void Equal<float, CUDAContext>(
const int count,
const float* a,
const float* b,
float* y,
CUDAContext* ctx) {
_Equal<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, a, b, y);
}
/******************** loss.l1_loss ********************/
template <typename T>
__global__ void _AbsGrad(
const int count,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const T val = dy[idx];
// val > 0: 1 | val == 0: 0 | val < 0: -1
dx[idx] = (val > T(0)) - (val < T(0));
}
}
template<> void AbsGrad<float, CUDAContext>(
const int count,
const float* dy,
float* dx,
CUDAContext* ctx) {
_AbsGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, dy, dx);
}
/******************** loss.sigmoid_cross_entropy ********************/
template <typename T>
__global__ void _SigmoidCrossEntropy(
const int count,
const T* logits,
const T* targets,
T* losses,
T* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
if (targets[idx] < 0) {
losses[idx] = flags[idx] = 0;
} else {
losses[idx] = log(1 +
exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0))
) + logits[idx] * ((logits[idx] >= 0) - targets[idx]);
flags[idx] = 1;
}
}
}
template <> void SigmoidCrossEntropy<float, CUDAContext>(
const int count,
const float* logits,
const float* targets,
float* losses,
float* flags,
CUDAContext* ctx) {
_SigmoidCrossEntropy<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, logits, targets, losses, flags);
}
template <typename T>
__global__ void _SigmoidCrossEntropyGrad(
const int count,
const T* logits,
const T* targets,
T* dlogits,
T* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
if (targets[idx] < 0) {
dlogits[idx] = flags[idx] = 0;
} else {
dlogits[idx] = 1 / (1 + exp(-logits[idx])) - targets[idx];
flags[idx] = 1;
}
}
}
template <> void SigmoidCrossEntropyGrad<float, CUDAContext>(
const int count,
const float* logits,
const float* targets,
float* dlogits,
float* flags,
CUDAContext* ctx) {
_SigmoidCrossEntropyGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, logits, targets, dlogits, flags);
}
/******************** loss.sigmoid_focal_loss ********************/
template <typename T>
__global__ void _SigmoidFocalLoss(
const int count,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const T* logits,
const T* targets,
T* losses,
T* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int iix = idx % inner_dim;
const int aix = (idx / inner_dim) % axis_dim;
const int oix = idx / inner_dim / axis_dim;
const int t = targets[oix * inner_dim + iix];
// ``0`` is reserved for targets if neg id is zero
// use ``aix + 1`` to match the targets
T c1 = (t == (aix + (neg_id ? 0 : 1)));
T c2 = (t != -1) & (t != (aix + (neg_id ? 0 : 1)));
T p = 1 / (1 + exp(-logits[idx])); // logit -> prob
// (1 - p)^{gamma} * log(p)
T pos_term = pow(1 - p, gamma) * log(max(p, FLT_MIN));
// p^{gamma} * log(1 - p)
T neg_term = pow(p, gamma) * (
-logits[idx] * (logits[idx] >= 0) - log(
1 + exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0)))
);
losses[idx] = 0.0;
losses[idx] += -c1 * pos_term * pos_alpha;
losses[idx] += -c2 * neg_term * neg_alpha;
flags[idx] = c1;
}
}
template <> void SigmoidFocalLoss<float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const float* logits,
const float* targets,
float* losses,
float* flags,
CUDAContext* ctx) {
TIndex count = outer_dim * axis_dim * inner_dim;
_SigmoidFocalLoss<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, axis_dim, inner_dim,
pos_alpha, neg_alpha, gamma, neg_id,
logits, targets, losses, flags);
}
template <typename T>
__global__ void _SigmoidFocalLossGradient(
const int count,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const T* logits,
const T* targets,
T* dlogits,
T* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int iix = idx % inner_dim;
const int aix = (idx / inner_dim) % axis_dim;
const int oix = idx / inner_dim / axis_dim;
const int t = targets[oix * inner_dim + iix];
// ``0`` is reserved for targets if neg id is zero
// use ``aix + 1`` to match the targets
T c1 = (t == (aix + (neg_id ? 0 : 1)));
T c2 = (t != -1) & (t != (aix + (neg_id ? 0 : 1)));
T p = 1 / (1 + exp(-logits[idx])); // logit -> prob
// (1 - p)^{gamma} * (1 - p - gamma * p * log(p))
T pos_term = pow((1 - p), gamma) * (
1 - p - p * gamma * log(max(p, FLT_MIN))
);
// p^{gamma} * (gamma * (1 - p) * log(1-p) - p)
T neg_term = pow(p, gamma) * (
(-logits[idx] * (logits[idx] >= 0) - log(
1 + exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0)))
) * (1 - p) * gamma - p
);
dlogits[idx] = 0.0;
dlogits[idx] += -c1 * pos_term * pos_alpha;
dlogits[idx] += -c2 * neg_term * neg_alpha;
flags[idx] = c1;
}
}
template <> void SigmoidFocalLossGradient<float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const float* logits,
const float* targets,
float* dlogits,
float* flags,
CUDAContext* ctx) {
TIndex count = outer_dim * axis_dim * inner_dim;
_SigmoidFocalLossGradient<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, axis_dim, inner_dim,
pos_alpha, neg_alpha, gamma, neg_id,
logits, targets, dlogits, flags);
}
/******************** loss.smooth_l1_loss ********************/
template <typename T>
__global__ void _SmoothL1(
const int count,
const float beta,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const T val = x[idx];
const T abs_val = abs(val);
if (abs_val < beta) y[idx] = 0.5 * val * val / beta;
else y[idx] = abs_val - 0.5 * beta;
}
}
template<> void SmoothL1<float, CUDAContext>(
const int count,
const float beta,
const float* x,
float* y,
CUDAContext* ctx) {
_SmoothL1<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, beta, x, y);
}
template <typename T>
__global__ void _SmoothL1Grad(
const int count,
const float beta,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const T val = dy[idx];
const T abs_val = abs(val);
if (abs_val < beta) dx[idx] = val / beta;
// val > 0: 1 | val == 0: 0 | val < 0: -1
else dx[idx] = (val > T(0)) - (val < T(0));
}
}
template<> void SmoothL1Grad<float, CUDAContext>(
const int count,
const float beta,
const float* dy,
float* dx,
CUDAContext* ctx) {
_SmoothL1Grad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, beta, dy, dx);
}
/******************** loss.softmax_cross_entropy ********************/
template <typename T>
__global__ void _SoftmaxCrossEntropy(
const int count,
const T* prob,
const T* target,
T* loss) {
CUDA_1D_KERNEL_LOOP(idx, count) {
loss[idx] = -target[idx] * log(max(prob[idx], FLT_MIN));
}
}
template <> void SoftmaxCrossEntropy<float, CUDAContext>(
const int count,
const float* prob,
const float* target,
float* loss,
CUDAContext* ctx) {
_SoftmaxCrossEntropy<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, prob, target, loss);
}
/******************** loss.softmax_focal_loss ********************/
template <typename T>
__global__ void _SoftmaxFocalLoss(
const int count,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const T* prob,
const T* labels,
const int* ignores,
const int num_ignores,
T* losses,
T* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
const int label = labels[oix * inner_dim + iix];
int k;
for (k = 0; k < num_ignores; k++) {
if (label == ignores[k]) {
losses[idx] = flags[idx] = 0;
break;
}
}
if (k == num_ignores) {
const int t = (oix * axis_dim + label) * inner_dim + iix;
T scale = pow(1.f - prob[t], gamma);
scale = label > neg_id ?
pos_alpha * scale : neg_alpha * scale;
losses[idx] = -scale * log(max(prob[t], FLT_MIN));
flags[idx] = label > neg_id ? 1 : 0;
}
}
}
template <> void SoftmaxFocalLoss<float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const float* prob,
const float* labels,
const int* ignores,
const int num_ignores,
float* losses,
float* flags,
CUDAContext* ctx) {
const int num_preds = outer_dim * inner_dim;
_SoftmaxFocalLoss<float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
num_preds, axis_dim, inner_dim,
pos_alpha, neg_alpha, gamma, neg_id,
prob, labels, ignores, num_ignores,
losses, flags);
}
template <typename T>
__global__ void _SoftmaxFocalLossGrad(
const int count,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const T* prob,
const T* labels,
const int* ignores,
const int num_ignores,
T* dx,
T* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
const int label = labels[oix * inner_dim + iix];
int k;
for (k = 0; k < num_ignores; k++)
if (label == ignores[k]) break;
if (k != num_ignores) {
for (int c = 0; c < axis_dim; c++)
dx[(oix * axis_dim + c) * inner_dim + iix] = 0;
flags[idx] = 0;
} else {
const int t = (oix * axis_dim + label) * inner_dim + iix;
T onemp = 1. - prob[t];
// unstable if gamma is 0
T grad = -gamma * pow(onemp, gamma - 1)
* log(max(prob[t], FLT_MIN))
* prob[t] + pow(onemp, gamma);
grad = label > neg_id ?
pos_alpha * grad : neg_alpha * grad;
for (int c = 0; c < axis_dim; c++) {
const int i = (oix * axis_dim + c) * inner_dim + iix;
if (c == label) {
dx[i] = grad * (prob[t] - 1);
} else {
dx[i] = grad * prob[i];
}
}
flags[idx] = label > neg_id ? 1 : 0;
}
}
}
template<> void SoftmaxFocalLossGrad<float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const float* prob,
const float* labels,
const int* ignores,
const int num_ignores,
float* dx,
float* flags,
CUDAContext* ctx) {
const int num_preds = outer_dim * inner_dim;
_SoftmaxFocalLossGrad<float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
num_preds, axis_dim, inner_dim,
pos_alpha, neg_alpha, gamma, neg_id,
prob, labels, ignores, num_ignores,
dx, flags);
}
/******************** loss.sparse_softmax_cross_entropy ********************/
template <typename Tx, typename Ty>
__global__ void _SparseSoftmaxCrossEntropy(
const int count,
const int axis_dim,
const int inner_dim,
const Tx* prob,
const Ty* labels,
const int* ignores,
const int num_ignores,
Tx* losses,
Tx* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
const int label = labels[oix * inner_dim + iix];
int k;
for (k = 0; k < num_ignores; k++) {
if (label == ignores[k]) {
losses[idx] = flags[idx] = 0;
break;
}
}
if (k == num_ignores) {
losses[idx] = -log(
max(prob[(oix * axis_dim + label)
* inner_dim + iix], FLT_MIN)
);
flags[idx] = 1;
}
}
}
template <> void SparseSoftmaxCrossEntropy<float, float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float* prob,
const float* labels,
const int* ignores,
const int num_ignores,
float* losses,
float* flags,
CUDAContext* ctx) {
const int num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropy<float, float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
num_preds, axis_dim, inner_dim,
prob, labels, ignores, num_ignores,
losses, flags);
}
template <> void SparseSoftmaxCrossEntropy<float, int64_t, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float* prob,
const int64_t* labels,
const int* ignores,
const int num_ignores,
float* losses,
float* flags,
CUDAContext* ctx) {
const int num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropy<float, int64_t>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
num_preds, axis_dim, inner_dim,
prob, labels, ignores, num_ignores,
losses, flags);
}
template <typename Tx, typename Ty>
__global__ void _SparseSoftmaxCrossEntropyGrad(
const int count,
const int axis_dim,
const int inner_dim,
const Tx* prob,
const Ty* labels,
const int* ignores,
const int num_ignores,
Tx* dx,
Tx* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
const int label = labels[oix * inner_dim + iix];
int k;
for (k = 0; k < num_ignores; k++)
if (label == ignores[k]) break;
if (k != num_ignores) {
for (int c = 0; c < axis_dim; c++)
dx[(oix * axis_dim + c) * inner_dim + iix] = 0;
flags[idx] = 0;
} else {
dx[(oix * axis_dim + label) * inner_dim + iix] -= 1;
flags[idx] = 1;
}
}
}
template<> void SparseSoftmaxCrossEntropyGrad<float, float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float* prob,
const float* labels,
const int* ignores,
const int num_ignores,
float* dx,
float* flags,
CUDAContext* ctx) {
const int num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropyGrad<float, float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
num_preds, axis_dim, inner_dim,
prob, labels, ignores, num_ignores,
dx, flags);
}
template<> void SparseSoftmaxCrossEntropyGrad<float, int64_t, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float* prob,
const int64_t* labels,
const int* ignores,
const int num_ignores,
float* dx,
float* flags,
CUDAContext* ctx) {
const int num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropyGrad<float, int64_t>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
num_preds, axis_dim, inner_dim,
prob, labels, ignores, num_ignores,
dx, flags);
}
/******************** misc.astype ********************/
template <typename Ta, typename Tb>
__global__ void _TypeA2B(
const int count,
const Ta* a,
Tb* b) {
CUDA_1D_KERNEL_LOOP(idx, count) {
b[idx] = a[idx];
}
}
#define DEFINE_TYPE_A2B(type_a, type_b) \
template <> void TypeA2B<type_a, type_b, CUDAContext>( \
const int count, \
const type_a* a, \
type_b* b, \
CUDAContext* ctx) { \
_TypeA2B<type_a, type_b> \
<< < CUDA_BLOCKS(count), CUDA_THREADS, \
0, ctx->cuda_stream() >> >(count, a, b); \
}
#define DEFINE_TYPE_A2ALL(type_a) \
DEFINE_TYPE_A2B(type_a, float); \
DEFINE_TYPE_A2B(type_a, double); \
DEFINE_TYPE_A2B(type_a, int); \
DEFINE_TYPE_A2B(type_a, int64_t); \
DEFINE_TYPE_A2B(type_a, uint8_t);
DEFINE_TYPE_A2ALL(float);
DEFINE_TYPE_A2ALL(double);
DEFINE_TYPE_A2ALL(int);
DEFINE_TYPE_A2ALL(int64_t);
DEFINE_TYPE_A2ALL(uint8_t);
/******************** misc.image_data ********************/
template <typename Tx, typename Ty>
__global__ void _ImageData_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const float* mean_values,
const float* std_values,
const Tx* x,
Ty* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % W;
const int h = (idx / W) % H;
const int c = (idx / W / H) % C;
const int n = idx / W / H / C;
Ty raw_value = x[((n * H + h) * W + w) * C + c];
if (mean_values != nullptr) raw_value -= mean_values[c];
if (std_values != nullptr) raw_value /= std_values[c];
y[idx] = raw_value;
}
}
template <typename Tx, typename Ty>
__global__ void _ImageData_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const float* mean_values,
const float* std_values,
const Tx* x,
Ty* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
Ty raw_value = x[idx];
if (mean_values != nullptr) raw_value -= mean_values[c];
if (std_values != nullptr) raw_value /= std_values[c];
y[idx] = raw_value;
}
}
template <> void ImageData<float, float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const float* mean_values,
const float* std_values,
const string& data_format,
const float* x,
float* y,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_ImageData_NCHW<float, float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, mean_values, std_values, x, y);
} else if (data_format == "NHWC") {
_ImageData_NHWC<float, float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, mean_values, std_values, x, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template <> void ImageData<uint8_t, float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const float* mean_values,
const float* std_values,
const string& data_format,
const uint8_t* x,
float* y,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_ImageData_NCHW<uint8_t, float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, mean_values, std_values, x, y);
} else if (data_format == "NHWC") {
_ImageData_NHWC<uint8_t, float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, mean_values, std_values, x, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
/******************** ndarray.arange ********************/
template <typename T>
__global__ void _Arange(
const int count,
const int start,
const int step,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = start + idx * step;
}
}
template<> void Arange<float, CUDAContext>(
const int count,
const int start,
const int step,
float* y,
CUDAContext* ctx) {
_Arange<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, start, step, y);
}
template<> void Arange<int, CUDAContext>(
const int count,
const int start,
const int step,
int* y,
CUDAContext* ctx) {
_Arange<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, start, step, y);
}
/******************** ndarray.argreduce ********************/
template <typename T>
__global__ void _Argmax(
const int count,
const int axis_dim,
const int inner_dim,
const T neg_bound,
const T* x,
int64_t* indices) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
int max_idx = -1; T max_val = neg_bound;
for (int j = 0; j < axis_dim; ++j) {
const T val = x[(oix * axis_dim + j)
* inner_dim + iix];
if (val > max_val) {
max_val = val;
max_idx = j;
}
}
indices[idx] = max_idx;
}
}
template <typename T>
__global__ void _Argmax_v2(
const int count,
const int axis_dim,
const int inner_dim,
const T neg_bound,
const T* x,
int64_t* indices,
T* values) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
int max_idx = -1; T max_val = neg_bound;
for (int j = 0; j < axis_dim; ++j) {
const T val = x[(oix * axis_dim + j)
* inner_dim + iix];
if (val > max_val) {
max_val = val;
max_idx = j;
}
}
indices[idx] = max_idx;
values[idx] = max_val;
}
}
template<> void Argmax<float, CUDAContext>(
const int count,
const int axis_dim,
const int inner_dim,
const int top_k,
const float* x,
int64_t* indices,
float* values,
CUDAContext* ctx) {
CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA";
if (values == nullptr) {
_Argmax<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
axis_dim, inner_dim, -FLT_MAX,
x, indices);
} else {
_Argmax_v2<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
axis_dim, inner_dim, -FLT_MAX,
x, indices, values);
}
}
template <typename T>
__global__ void _Argmin(
const int count,
const int axis_dim,
const int inner_dim,
const T pos_bound,
const T* x,
int64_t* indices) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
int min_idx = -1; T min_val = pos_bound;
for (int j = 0; j < axis_dim; ++j) {
const T val = x[(oix * axis_dim + j)
* inner_dim + iix];
if (val < min_val) {
min_val = val;
min_idx = j;
}
}
indices[idx] = min_idx;
}
}
template <typename T>
__global__ void _Argmin_v2(
const int count,
const int axis_dim,
const int inner_dim,
const T pos_bound,
const T* x,
int64_t* indices,
T* values) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
int min_idx = -1; T min_val = pos_bound;
for (int j = 0; j < axis_dim; ++j) {
const T val = x[(oix * axis_dim + j)
* inner_dim + iix];
if (val < min_val) {
min_val = val;
min_idx = j;
}
}
indices[idx] = min_idx;
values[idx] = min_val;
}
}
template<> void Argmin<float, CUDAContext>(
const int count,
const int axis_dim,
const int inner_dim,
const int top_k,
const float* x,
int64_t* indices,
float* values,
CUDAContext* ctx) {
CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA";
if (values == nullptr) {
_Argmin<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
axis_dim, inner_dim, FLT_MAX,
x, indices);
} else {
_Argmin_v2<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
axis_dim, inner_dim, FLT_MAX,
x, indices, values);
}
}
/******************** ndarray.gather ********************/
template <typename T>
__global__ void _CanonicalAxis(
const int count,
const int dim,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
if (y[idx] < 0) y[idx] += dim;
}
}
template <> void CanonicalAxis<int, CUDAContext>(
const int count,
const int dim,
int* y,
CUDAContext* ctx) {
_CanonicalAxis<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, dim, y);
}
template <typename T>
__global__ void _Gather(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int* indices,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int outer_idx = idx / inner_dim / y_slice_dim;
const int slice_idx = idx % inner_dim;
const int y_idx_offset = (idx / inner_dim) % y_slice_dim;
const int x_idx_offset = indices[y_idx_offset];
const int x_idx = (outer_idx * x_slice_dim + x_idx_offset)
* inner_dim + slice_idx;
y[idx] = x[x_idx];
}
}
template <> void Gather<float, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int* indices,
const float* x,
float* y,
CUDAContext* ctx) {
_Gather<float>
<< <CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_slice_dim, y_slice_dim,
indices, x, y);
}
template <> void Gather<int, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int* indices,
const int* x,
int* y,
CUDAContext* ctx) {
_Gather<int>
<< <CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_slice_dim, y_slice_dim,
indices, x, y);
}
template <typename T>
__global__ void _GatherGrad(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int* indices,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int outer_idx = idx / inner_dim / y_slice_dim;
const int slice_idx = idx % inner_dim;
const int y_idx_offset = (idx / inner_dim) % y_slice_dim;
const int x_idx_offset = indices[y_idx_offset];
const int x_idx = (outer_idx * x_slice_dim + x_idx_offset)
* inner_dim + slice_idx;
atomicAdd(dx + x_idx, dy[idx]);
}
}
template <> void GatherGrad<float, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int* indices,
const float* dy,
float* dx,
CUDAContext* ctx) {
_GatherGrad<float>
<< <CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_slice_dim, y_slice_dim,
indices, dy, dx);
}
template <> void GatherGrad<int, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int* indices,
const int* dy,
int* dx,
CUDAContext* ctx) {
_GatherGrad<int>
<< <CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_slice_dim, y_slice_dim,
indices, dy, dx);
}
/******************** ndarray.concat ********************/
template <typename T>
__global__ void _Concat(
const int count,
const int outer_dim,
const int inner_dim,
const int x_concat_dim,
const int y_concat_dim,
const int concat_offset,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int tmp = x_concat_dim * inner_dim;
const int outer_idx = idx / tmp;
const int concat_idx = idx % tmp;
const int y_idx = (outer_idx * y_concat_dim + concat_offset)
* inner_dim + concat_idx;
y[y_idx] = x[idx];
}
}
template <> void Concat<float, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_concat_dim,
const int y_concat_dim,
const int concat_offset,
const float* x,
float* y,
CUDAContext* ctx) {
_Concat<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_concat_dim, y_concat_dim,
concat_offset, x, y);
}
template <typename T>
__global__ void _ConcatGrad(
const int count,
const int outer_dim,
const int inner_dim,
const int x_concat_dim,
const int y_concat_dim,
const int concat_offset,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int tmp = x_concat_dim * inner_dim;
const int outer_idx = idx / tmp;
const int concat_idx = idx % tmp;
const int y_idx = (outer_idx * y_concat_dim + concat_offset)
* inner_dim + concat_idx;
dx[idx] = dy[y_idx];
}
}
template <> void ConcatGrad<float, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_concat_dim,
const int y_concat_dim,
const int concat_offset,
const float* dy,
float* dx,
CUDAContext* ctx) {
_ConcatGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_concat_dim, y_concat_dim,
concat_offset, dy, dx);
}
/******************** ndarray.crop ********************/
template<typename T>
__global__ void _Crop1D(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
y[idx] = x[(o * dim + ex_d + start) * inner_dim + i];
}
}
template<> void Crop1D<int, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int* x,
int* y,
CUDAContext* ctx) {
_Crop1D<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, start, x, y);
}
template<> void Crop1D<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const float* x,
float* y,
CUDAContext* ctx) {
_Crop1D<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, start, x, y);
}
template<typename T>
__global__ void _Crop1DGrad(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int end,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int d = (idx / inner_dim) % dim;
const int o = idx / inner_dim / dim;
dx[idx] = (d < start || d >= end) ? 0 :
dy[(o * ex_dim + d - start) * inner_dim + i];
}
}
template<> void Crop1DGrad<int, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int end,
const int* dy,
int* dx,
CUDAContext* ctx) {
_Crop1DGrad<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, start, end, dy, dx);
}
template<> void Crop1DGrad<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int end,
const float* dy,
float* dx,
CUDAContext* ctx) {
_Crop1DGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, start, end, dy, dx);
}
/******************** ndarray.pad ********************/
template <typename T>
__global__ void _ConstPad1D(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const T value,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
const int d = ex_d - pad_l;
y[idx] = (d < 0 || d >= dim) ? value :
x[(o * dim + d) * inner_dim + i];
}
}
template <> void ConstPad1D<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const float value,
const float* x,
float* y,
CUDAContext* ctx) {
_ConstPad1D<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, pad_l, value, x, y);
}
template <typename T>
__global__ void _ReflectPad1D(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
int d = ex_d - pad_l;
d = max(d, -d);
d = min(d, 2 * dim - d - 2);
y[idx] = x[(o * dim + d) * inner_dim + i];
}
}
template <> void ReflectPad1D<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const float* x,
float* y,
CUDAContext* ctx) {
_ReflectPad1D<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, pad_l, x, y);
}
template <typename T>
__global__ void _EdgePad1D(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
const int d = min(dim - 1, max(ex_d - pad_l, 0));
y[idx] = x[(o * dim + d) * inner_dim + i];
}
}
template <> void EdgePad1D<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const float* x,
float* y,
CUDAContext* ctx) {
_EdgePad1D<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, pad_l, x, y);
}
template <typename T>
__global__ void _ConstPad1DGrad(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % dim + pad_l;
const int o = idx / inner_dim / dim;
dx[idx] = dy[(o * ex_dim + ex_d) * inner_dim + i];
}
}
template <> void ConstPad1DGrad<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const float* dy,
float* dx,
CUDAContext* ctx) {
_ConstPad1DGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, pad_l, dy, dx);
}
template <typename T>
__global__ void _ReflectPad1DGrad(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
int d = ex_d - pad_l;
d = max(d, -d);
d = min(d, 2 * dim - d - 2);
atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]);
}
}
template <> void ReflectPad1DGrad<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const float* dy,
float* dx,
CUDAContext* ctx) {
_ReflectPad1DGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, pad_l, dy, dx);
}
template <typename T>
__global__ void _EdgePad1DGrad(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
const int d = min(dim - 1, max(ex_d - pad_l, 0));
atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]);
}
}
template <> void EdgePad1DGrad<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const float* dy,
float* dx,
CUDAContext* ctx) {
_EdgePad1DGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, pad_l, dy, dx);
}
/******************** ndarray.one_hot ********************/
template <typename T>
__global__ void _OneHot(
const int count,
const int depth,
const int on_value,
const float* x,
float* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int val = x[idx];
y[idx * depth + val] = on_value;
}
}
template <> void OneHot<float, CUDAContext>(
const int count,
const int depth,
const int on_value,
const float* x,
float* y,
CUDAContext* ctx) {
_OneHot<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
depth, on_value, x, y);
}
/******************** ndarray.reduce ********************/
template <typename T>
__global__ void _Sum(
const int count,
const int axis_dim,
const int inner_dim,
const T* x,
float* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
T sum_val = 0.0;
const int offset = (idx / inner_dim * axis_dim)
* inner_dim + idx % inner_dim;
for (int j = 0; j < axis_dim; j++)
sum_val += x[offset + j * inner_dim];
y[idx] = sum_val;
}
}
template<> void Sum<float, CUDAContext>(
const int count,
const int axis_dim,
const int inner_dim,
const float* x,
float* y,
CUDAContext* ctx) {
_Sum<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
axis_dim, inner_dim, x, y);
}
template <typename T>
__global__ void _SumGrad(
const int count,
const int axis_dim,
const int inner_dim,
const T coeff,
const T* dy,
float* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int offset = (idx / inner_dim * axis_dim)
* inner_dim + idx % inner_dim;
for (int j = 0; j < axis_dim; j++)
dx[offset + j * inner_dim] = dy[idx] * coeff;
}
}
template<> void SumGrad<float, CUDAContext>(
const int count,
const int axis_dim,
const int inner_dim,
const float coeff,
const float* dy,
float* dx,
CUDAContext* ctx) {
_SumGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
axis_dim, inner_dim, coeff, dy, dx);
}
/******************** ndarray.repeat ********************/
template <typename T>
__global__ void _Repeat(
const int count,
const int inner_dim,
const int repeats,
const int dim,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int d = idx % inner_dim;
const int b = (idx / inner_dim / repeats) % dim;
const int n = idx / inner_dim / repeats / dim;
const int x_idx = (n * dim + b) * inner_dim + d;
y[idx] = x[x_idx];
}
}
template <> void Repeat<float, CUDAContext>(
const int count,
const int outer_dim,
const int dim,
const int inner_dim,
const int repeats,
const float* x,
float* y,
CUDAContext* ctx) {
_Repeat<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
inner_dim, repeats, dim, x, y);
}
template <typename T>
__global__ void _RepeatGrad(
const int count,
const int inner_dim,
const int repeats,
const int dim,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int d = idx % inner_dim;
const int b = (idx / inner_dim) % dim;
const int n = idx / inner_dim / dim;
T gradient = 0;
for (int t = 0; t < repeats; t++)
gradient += dy[
(((n * dim + b) * repeats) + t)
* inner_dim + d];
dx[idx] = gradient;
}
}
template <> void RepeatGrad<float, CUDAContext>(
const int count,
const int outer_dim,
const int dim,
const int inner_dim,
const int repeats,
const float* dy,
float* dx,
CUDAContext* ctx) {
_RepeatGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, inner_dim, repeats, dim, dy, dx);
}
/******************** ndarray.slice ********************/
template <typename T>
__global__ void _Slice(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int slice_offset,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int tmp = y_slice_dim * inner_dim;
const int outer_idx = idx / tmp;
const int slice_idx = idx % tmp;
const int x_idx = (outer_idx * x_slice_dim + slice_offset)
* inner_dim + slice_idx;
y[idx] = x[x_idx];
}
}
template <> void Slice<float, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int slice_offset,
const float* x,
float* y,
CUDAContext* ctx) {
_Slice<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_slice_dim, y_slice_dim,
slice_offset, x, y);
}
template <typename T>
__global__ void _SliceGrad(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int slice_offset,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int tmp = y_slice_dim * inner_dim;
const int outer_idx = idx / tmp;
const int slice_idx = idx % tmp;
const int x_idx = (outer_idx * x_slice_dim + slice_offset)
* inner_dim + slice_idx;
dx[x_idx] = dy[idx];
}
}
template <> void SliceGrad<float, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int slice_offset,
const float* dy,
float* dx,
CUDAContext* ctx) {
_SliceGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_slice_dim, y_slice_dim,
slice_offset, dy, dx);
}
/******************** ndarray.tile ********************/
template <typename T>
__global__ void _Tile(
const int count,
const int ex_inner_dim,
const int multiple,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int d = idx % ex_inner_dim;
const int n = idx / ex_inner_dim / multiple;
const int x_idx = n * ex_inner_dim + d;
y[idx] = x[x_idx];
}
}
template <> void Tile<float, CUDAContext>(
const int count,
const int outer_dim,
const int ex_inner_dim,
const int multiple,
const float* x,
float* y,
CUDAContext* ctx) {
_Tile<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
ex_inner_dim, multiple, x, y);
}
template <typename T>
__global__ void _TileGrad(
const int count,
const int ex_inner_dim,
const int multiple,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
T gradient = 0;
const int offset = (idx / ex_inner_dim * multiple)
* ex_inner_dim + idx % ex_inner_dim;
for (int t = 0; t < multiple; t++)
gradient += dy[offset + t * ex_inner_dim];
dx[idx] = gradient;
}
}
template <> void TileGrad<float, CUDAContext>(
const int count,
const int outer_dim,
const int ex_inner_dim,
const int multiple,
const float* dy,
float* dx,
CUDAContext* ctx) {
_TileGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, ex_inner_dim, multiple, dy, dx);
}
/******************** ndarray.transpose ********************/
template <typename T>
__global__ void _Transpose(
const int count,
const int ndim,
const int* order,
const int* old_steps,
const int* new_steps,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int x_idx = 0, y_idx = idx;
for (int j = 0; j < ndim; ++j) {
int k = order[j];
x_idx += (y_idx / new_steps[j]) * old_steps[k];
y_idx %= new_steps[j];
}
y[idx] = x[x_idx];
}
}
template <> void Transpose<float, CUDAContext>(
const int count,
const int ndim,
const int* order,
const int* old_steps,
const int* new_steps,
const float* x,
float* y,
CUDAContext* ctx) {
_Transpose<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
ndim, order, old_steps, new_steps, x, y);
}
template <typename T>
__global__ void _TransposeGrad(
const int count,
const int ndim,
const int* order,
const int* old_steps,
const int* new_steps,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int x_idx = 0, y_idx = idx;
for (int j = 0; j < ndim; ++j) {
int k = order[j];
x_idx += (y_idx / new_steps[j]) * old_steps[k];
y_idx %= new_steps[j];
}
dx[x_idx] = dy[idx];
}
}
template <> void TransposeGrad<float, CUDAContext>(
const int count,
const int ndim,
const int* order,
const int* old_steps,
const int* new_steps,
const float* dy,
float* dx,
CUDAContext* ctx) {
_TransposeGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
ndim, order, old_steps, new_steps, dy, dx);
}
/******************** recurrent.lstm_cell ********************/
template <typename T>
__global__ void _LSTMCellAct(
const int count,
const int c_offset,
const int x_offset,
T* xact) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int offset = idx % x_offset;
xact[idx] = offset < c_offset ?
_SigmoidUnit<float>(xact[idx]) : tanh(xact[idx]);
}
}
template <typename T>
__global__ void _LSTMCellGate(
const int count,
const int hidden_size,
const int o_offset, // 2 * hidden_size
const int c_offset, // 3 * hidden_size
const int x_offset, // 4 * hidden_size
const T* cx,
const T* xact,
T* c,
T* h) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int n = idx / hidden_size;
const int offset = idx % hidden_size;
const T* x = xact + n * x_offset;
const T i = x[offset];
const T f = x[offset + hidden_size];
const T o = x[offset + o_offset];
T c_ = x[offset + c_offset];
c_ = c[idx] = f * cx[idx] + i * c_;
h[idx] = o * tanh(c_);
}
}
template <> void LSTMCell<float, CUDAContext>(
const int count,
const int N,
const int C,
const float* cx,
float* xact,
float* c,
float* h,
CUDAContext* ctx) {
const int o_offset = 2 * C,
c_offset = 3 * C,
x_offset = 4 * C;
_LSTMCellAct<float>
<< < CUDA_BLOCKS(count * 4), CUDA_THREADS,
0, ctx->cuda_stream() >> > (count * 4,
c_offset, x_offset, xact);
_LSTMCellGate<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
C, o_offset, c_offset, x_offset,
cx, xact, c, h);
}
template <typename T>
__global__ void _LSTMCellGateGrad(
const int count,
const int hidden_size,
const int o_offset,
const int c_offset,
const int x_offset,
const T* cx,
const T* xact,
const T* c,
const T* dc,
const T* dh,
T* dcx,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int n = idx / hidden_size;
const int offset = idx % hidden_size;
const T* xact_ = xact + n * x_offset;
T* dx_ = dx + n * x_offset;
const T i = xact_[offset];
const T f = xact_[offset + hidden_size];
const T o = xact_[offset + o_offset];
const T g = xact_[offset + c_offset];
const T tanh_c = tanh(c[idx]);
const T dcx_sum_term =
dh[idx] * o * (1 - tanh_c * tanh_c) + dc[idx];
dcx[idx] = dcx_sum_term * f;
dx_[offset] = dcx_sum_term * g;
dx_[offset + hidden_size] = dcx_sum_term * cx[idx];
dx_[offset + o_offset] = dh[idx] * tanh_c;
dx_[offset + c_offset] = dcx_sum_term * i;
}
}
template <typename T>
__global__ void _LSTMCellActGrad(
const int count,
const int c_offset,
const int x_offset,
const T* xact,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int offset = idx % x_offset;
const T val = xact[idx];
if (offset < c_offset) dx[idx] = dx[idx] * val * (T(1) - val);
else dx[idx] = dx[idx] * (T(1) - val * val);
}
}
template <> void LSTMCellGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const float* cx,
const float* xact,
const float* c,
const float* dc,
const float* dh,
float* dcx,
float* dx,
CUDAContext* ctx) {
const int o_offset = 2 * C,
c_offset = 3 * C,
x_offset = 4 * C;
_LSTMCellGateGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
C, o_offset, c_offset, x_offset,
cx, xact, c, dc, dh, dcx, dx);
_LSTMCellActGrad<float>
<< < CUDA_BLOCKS(count * 4), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count * 4,
c_offset, x_offset, xact, dx);
}
/******************** update.adam_update ********************/
template <typename T>
__global__ void _AdamUpdate(
const int count,
const T lr,
const T beta1,
const T beta2,
const T eps,
T* g,
T* m,
T* v) {
CUDA_1D_KERNEL_LOOP(i, count) {
T gi = g[i];
T mi = m[i] = m[i] * beta1 + gi * (1 - beta1);
T vi = v[i] = v[i] * beta2 + gi * gi * (1 - beta2);
g[i] = lr * mi / (sqrt(vi) + eps);
}
}
template <> void AdamUpdate<float, CUDAContext>(
const int count,
const float lr,
const float beta1,
const float beta2,
const float eps,
float* g,
float* m,
float* v,
CUDAContext* ctx) {
_AdamUpdate<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> > (count,
lr, beta1, beta2, eps, g, m, v);
}
/******************** update.nesterov_update ********************/
template <typename T>
__global__ void _NesterovUpdate(
const int count,
const T lr,
const T momentum,
T* g,
T* h) {
CUDA_1D_KERNEL_LOOP(i, count) {
T hi = h[i];
T hi_new = h[i] = momentum * hi + lr * g[i];
g[i] = (1 + momentum) * hi_new - momentum * hi;
}
}
template <> void NesterovUpdate<float, CUDAContext>(
const int count,
const float lr,
const float momentum,
float* g,
float* h,
CUDAContext* ctx) {
_NesterovUpdate<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> > (count,
lr, momentum, g, h);
}
/******************** update.rmsprop_update ********************/
template <typename T>
__global__ void _RMSPropUpdate(
const int count,
const T lr,
const T decay,
const T eps,
T* g,
T* h) {
CUDA_1D_KERNEL_LOOP(i, count) {
T gi = g[i];
T hi = h[i] = decay * h[i] + (1 - decay) * gi * gi;
g[i] = lr * g[i] / (sqrt(hi) + eps);
}
}
template <> void RMSPropUpdate<float, CUDAContext>(
const int count,
const float lr,
const float decay,
const float eps,
float* g,
float* h,
CUDAContext* ctx) {
_RMSPropUpdate<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
lr, decay, eps, g, h);
}
/******************** update.sgd_update ********************/
template <typename T>
__global__ void _SGDUpdate(
const int count,
const T lr,
const T momentum,
T* g,
T* h) {
CUDA_1D_KERNEL_LOOP(i, count) {
T hi = h[i];
g[i] = h[i] = momentum * hi + lr * g[i];
}
}
template <> void SGDUpdate<float, CUDAContext>(
const int count,
const float lr,
const float momentum,
float* g,
float* h,
CUDAContext* ctx) {
_SGDUpdate<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
lr, momentum, g, h);
}
/******************** vision.bias_add ********************/
template <typename T>
__global__ void _BiasAdd_NCHW(
const int count,
const int dim,
const int inner_dim,
const T* bias,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] += bias[(idx / inner_dim) % dim];
}
}
template <typename T>
__global__ void _BiasAdd_NHWC(
const int count,
const int dim,
const int inner_dim,
const T* bias,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] += bias[idx % dim];
}
}
template<> void BiasAdd<float, CUDAContext>(
const int count,
const int outer_dim,
const int dim,
const int inner_dim,
const string& data_format,
const float* bias,
const float* bias_multiplier,
float* y,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_BiasAdd_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, dim, inner_dim, bias, y);
} else if (data_format == "NHWC") {
_BiasAdd_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, dim, inner_dim, bias, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
/******************** vision.bilinear_resize ********************/
template <typename T>
__global__ void _BilinearResize_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % out_w;
const int h = (idx / out_w) % out_h;
const int c = (idx / out_w / out_h) % C;
const int n = idx / out_w / out_w / C;
const float h_in = h * scale_h;
const int top_y_idx = floorf(h_in);
const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1;
const float y_lerp = h_in - top_y_idx;
const float w_in = w * scale_w;
const int left_x_idx = floorf(w_in);
const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1;
const float x_lerp = w_in - left_x_idx;
const int NCHT = (n * C + c) * H + top_y_idx;
const int NCHB = (n * C + c) * H + bottom_y_idx;
const float top_left(x[NCHT * W + left_x_idx]);
const float top_right(x[NCHT * W + right_x_idx]);
const float bottom_left(x[NCHB * W + left_x_idx]);
const float bottom_right(x[NCHB * W + right_x_idx]);
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
y[idx] = top + (bottom - top) * y_lerp;
}
}
template <typename T>
__global__ void _BilinearResize_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % out_w;
const int h = (idx / C / out_w) % out_h;
const int n = idx / C / out_w / out_h;
const float h_in = h * scale_h;
const int top_y_idx = floorf(h_in);
const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1;
const float y_lerp = h_in - top_y_idx;
const float w_in = w * scale_w;
const int left_x_idx = floorf(w_in);
const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1;
const float x_lerp = w_in - left_x_idx;
const int NHT = n * H + top_y_idx;
const int NHB = n * H + bottom_y_idx;
const float top_left(x[(NHT * W + left_x_idx) * C + c]);
const float top_right(x[(NHT * W + right_x_idx) * C + c]);
const float bottom_left(x[(NHB * W + left_x_idx) * C + c]);
const float bottom_right(x[(NHB * W + right_x_idx) * C + c]);
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
y[idx] = top + (bottom - top) * y_lerp;
}
}
template <> void BilinearResize<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const string& data_format,
const float* x,
float* y,
CUDAContext* ctx) {
const float scale_h = (float)H / out_h;
const float scale_w = (float)W / out_w;
if (data_format == "NCHW") {
_BilinearResize_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, x, y);
} else if(data_format == "NHWC") {
_BilinearResize_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, x, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template <typename T>
__global__ void _BilinearResizeGrad_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % out_w;
const int h = (idx / out_w) % out_h;
const int c = (idx / out_w / out_h) % C;
const int n = idx / out_w / out_w / C;
const float h_in = h * scale_h;
const int top_y_idx = floorf(h_in);
const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1;
const float y_lerp = h_in - top_y_idx;
const float w_in = w * scale_w;
const int left_x_idx = floorf(w_in);
const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1;
const float x_lerp = w_in - left_x_idx;
const int NCHT = (n * C + c) * H + top_y_idx;
const int NCHB = (n * C + c) * H + bottom_y_idx;
const float dtop = (1 - y_lerp) * dy[idx];
const float dbottom = y_lerp * dy[idx];
atomicAdd(&dx[NCHT * W + left_x_idx], static_cast<T>((1 - x_lerp) * dtop));
atomicAdd(&dx[NCHT * W + right_x_idx], static_cast<T>(x_lerp * dtop));
atomicAdd(&dx[NCHB * W + left_x_idx], static_cast<T>((1 - x_lerp) * dbottom));
atomicAdd(&dx[NCHB * W + right_x_idx], static_cast<T>(x_lerp * dbottom));
}
}
template <typename T>
__global__ void _BilinearResizeGrad_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % out_w;
const int h = (idx / C / out_w) % out_h;
const int n = idx / C / out_w / out_h;
const float h_in = h * scale_h;
const int top_y_idx = floorf(h_in);
const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1;
const float y_lerp = h_in - top_y_idx;
const float w_in = w * scale_w;
const int left_x_idx = floorf(w_in);
const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1;
const float x_lerp = w_in - left_x_idx;
const int NHT = n * H + top_y_idx;
const int NHB = n * H + bottom_y_idx;
const float dtop = (1 - y_lerp) * dy[idx];
const float dbottom = y_lerp * dy[idx];
atomicAdd(&dx[(NHT * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dtop));
atomicAdd(&dx[(NHT * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dtop));
atomicAdd(&dx[(NHB * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dbottom));
atomicAdd(&dx[(NHB * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dbottom));
}
}
template <> void BilinearResizeGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const string& data_format,
const float* dy,
float* dx,
CUDAContext* ctx) {
const float scale_h = (float)H / out_h;
const float scale_w = (float)W / out_w;
if (data_format == "NCHW") {
_BilinearResizeGrad_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, dy, dx);
} else if(data_format == "NHWC") {
_BilinearResizeGrad_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, dy, dx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
/******************** vision.conv ********************/
template<typename T>
__global__ void _Im2Col2d_NCHW(
const int count,
const int H,
const int W,
const int col_h,
const int col_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const T* im,
T* col) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % col_w;
const int h_idx = idx / col_w;
const int h = h_idx % col_h;
const int im_c = h_idx / col_h;
const int c = im_c * kernel_h * kernel_w;
const int im_h_off = h * stride_h - pad_h;
const int im_w_off = w * stride_w - pad_w;
T* col_ptr = col;
col_ptr += ((c * col_h + h) * col_w + w);
const T* im_ptr = im;
im_ptr += ((im_c * H + im_h_off) * W + im_w_off);
for (int kh = 0; kh < kernel_h; kh++) {
for (int kw = 0; kw < kernel_w; kw++) {
const int im_h = kh * dilation_h + im_h_off;
const int im_w = kw * dilation_w + im_w_off;
*col_ptr = (im_h >= 0 && im_w >= 0 && im_h < H && im_w < W) ?
im_ptr[kh * dilation_h * W + kw * dilation_w] : 0;
col_ptr += (col_h * col_w);
}
}
}
}
template<typename T>
__global__ void _Im2Col2d_NHWC(
const int count,
const int C,
const int H,
const int W,
const int col_h,
const int col_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const T* im,
T* col) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % col_w;
const int h = idx / C / col_w;
const int im_h_off = h * stride_h - pad_h;
const int im_w_off = w * stride_w - pad_w;
const int base_col_idx = (h * col_w) + w;
for (int kh = 0; kh < kernel_h; kh++) {
for (int kw = 0; kw < kernel_w; kw++) {
const int im_h = kh * dilation_h + im_h_off;
const int im_w = kw * dilation_w + im_w_off;
const int col_idx = (
((base_col_idx * kernel_h + kh) * kernel_w + kw) * C + c
);
col[col_idx] = (im_h >= 0 && im_w >= 0 &&
im_h < H && im_w < W) ? im[(im_h * W + im_w) * C + c] : 0;
}
}
}
}
template <> void Im2Col2d<float, CUDAContext>(
const int C,
const int H,
const int W,
const int col_h,
const int col_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const string& data_format,
const float* im,
float* col,
CUDAContext* ctx) {
if (data_format == "NCHW") {
const int count = (C * col_h * col_w);
_Im2Col2d_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
H, W, col_h, col_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w,
dilation_h, dilation_w, im, col);
} else if (data_format == "NHWC") {
const int count = (col_h * col_w * C);
_Im2Col2d_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
C, H, W, col_h, col_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w,
dilation_h, dilation_w, im, col);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template<typename T>
__global__ void _Col2Im2d_NCHW(
const int count,
const int H,
const int W,
const int col_h,
const int col_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const T* col,
T* im) {
CUDA_1D_KERNEL_LOOP(idx, count) {
T val = 0;
const int im_w = idx % W + pad_w;
const int im_h = (idx / W) % H + pad_h;
const int im_c = idx / W / H;
const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1;
const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1;
// redundant pixels will be ignored when conv
// note to clip them by min(x,col_w)
const int w_start = (im_w < ex_kernel_w) ?
0 : (im_w - ex_kernel_w) / stride_w + 1;
const int w_end = min(im_w / stride_w + 1, col_w);
const int h_start = (im_h < ex_kernel_h) ?
0 : (im_h - ex_kernel_h) / stride_h + 1;
const int h_end = min(im_h / stride_h + 1, col_h);
for (int h = h_start; h < h_end; ++h) {
for (int w = w_start; w < w_end; ++w) {
int kh_off = (im_h - h * stride_h);
int kw_off = (im_w - w * stride_w);
// only the serval im pixels used in dilated-conv
// ignore the corresponding col pixels
if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) {
kh_off /= dilation_h;
kw_off /= dilation_w;
const int col_idx = ((
(im_c * kernel_h + kh_off) * kernel_w + kw_off) * col_h + h
) * col_w + w;
val += col[col_idx];
}
}
}
im[idx] = val;
}
}
template<typename T>
__global__ void _Col2Im2d_NHWC(
const int count,
const int C,
const int H,
const int W,
const int col_h,
const int col_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const T* col,
T* im) {
CUDA_1D_KERNEL_LOOP(idx, count) {
T val = 0;
const int im_c = idx % C;
const int im_w = (idx / C) % W + pad_w;
const int im_h = (idx / C / W) + pad_h;
const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1;
const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1;
// redundant pixels will be ignored when conv
// note to clip them by min(x,col_w)
const int w_start = (im_w < ex_kernel_w) ?
0 : (im_w - ex_kernel_w) / stride_w + 1;
const int w_end = min(im_w / stride_w + 1, col_w);
const int h_start = (im_h < ex_kernel_h) ?
0 : (im_h - ex_kernel_h) / stride_h + 1;
const int h_end = min(im_h / stride_h + 1, col_h);
for (int h = h_start; h < h_end; ++h) {
for (int w = w_start; w < w_end; ++w) {
int kh_off = (im_h - h * stride_h);
int kw_off = (im_w - w * stride_w);
// only the serval im pixels used in dilated-conv
// ignore the corresponding col pixels
if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) {
kh_off /= dilation_h;
kw_off /= dilation_w;
const int col_idx = (
((h * col_w + w) * kernel_h + kh_off) * kernel_w + kw_off
) * C + im_c;
val += col[col_idx];
}
}
}
im[idx] = val;
}
}
template <> void Col2Im2d<float, CUDAContext>(
const int C,
const int H,
const int W,
const int col_h,
const int col_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const string& data_format,
const float* col,
float* im,
CUDAContext* ctx) {
if (data_format == "NCHW") {
const int count = (C * H * W);
_Col2Im2d_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
H, W, col_h, col_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w,
dilation_h, dilation_w, col, im);
} else if (data_format == "NHWC") {
const int count = (H * W * C);
_Col2Im2d_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
C, H, W, col_h, col_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w,
dilation_h, dilation_w, col, im);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
/******************** vision.nn_resize ********************/
template <typename T>
__global__ void _NNResize_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % out_w;
const int h = (idx / out_w) % out_h;
const int c = (idx / out_w / out_h) % C;
const int n = idx / out_w / out_h / C;
const int h_in = min(int(floorf(h * scale_h)), H - 1);
const int w_in = min(int(floorf(w * scale_w)), W - 1);
y[idx] = x[((n * C + c) * H + h_in) * W + w_in];
}
}
template <typename T>
__global__ void _NNResize_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % out_w;
const int h = (idx / C / out_w) % out_h;
const int n = idx / C / out_w / out_h;
const int h_in = min(int(floorf(h * scale_h)), H - 1);
const int w_in = min(int(floorf(w * scale_w)), W - 1);
y[idx] = x[((n * H + h_in) * W + w_in) * C + c];
}
}
template <> void NNResize<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const string& data_format,
const float* x,
float* y,
CUDAContext* ctx) {
const float scale_h = (float)H / out_h;
const float scale_w = (float)W / out_w;
if (data_format == "NCHW") {
_NNResize_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, x, y);
} else if(data_format == "NHWC") {
_NNResize_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, x, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template <typename T>
__global__ void _NNResizeGrad_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % out_w;
const int h = (idx / out_w) % out_h;
const int c = (idx / out_w / out_h) % C;
const int n = idx / out_w / out_h / C;
const int h_in = min(int(floorf(h * scale_h)), H - 1);
const int w_in = min(int(floorf(w * scale_w)), W - 1);
atomicAdd(&dx[((n * C + c) * H + h_in) * W + w_in], dy[idx]);
}
}
template <typename T>
__global__ void _NNResizeGrad_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % out_w;
const int h = (idx / C / out_w) % out_h;
const int n = idx / C / out_w / out_h;
const int h_in = min(int(floorf(h * scale_h)), H - 1);
const int w_in = min(int(floorf(w * scale_w)), W - 1);
atomicAdd(&dx[((n * H + h_in) * W + w_in) * C + c], dy[idx]);
}
}
template <> void NNResizeGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const string& data_format,
const float* dy,
float* dx,
CUDAContext* ctx) {
const float scale_h = (float)H / out_h;
const float scale_w = (float)W / out_w;
if (data_format == "NCHW") {
_NNResizeGrad_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, dy, dx);
} else if(data_format == "NHWC") {
_NNResizeGrad_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, dy, dx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
/******************** vision.pooling ********************/
template<typename T>
__global__ void _MAXPooling2d_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* x,
int* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int pw = idx % pool_w;
const int ph = (idx / pool_w) % pool_h;
const int pc = (idx / pool_w / pool_h) % C;
const int pn = idx / pool_w / pool_h / C;
int start_h = ph * stride_h - pad_h;
int start_w = pw * stride_w - pad_w;
const int end_h = min(start_h + kernel_h, H);
const int end_w = min(start_w + kernel_w, W);
start_h = max(start_h, 0);
start_w = max(start_w, 0);
T max_val = -FLT_MAX;
int max_idx = -1;
const T* x_ptr = x + (pn * C + pc) * H * W;
for (int h = start_h; h < end_h; ++h) {
for (int w = start_w; w < end_w; ++w) {
if (x_ptr[h * W + w] > max_val) {
max_idx = h * W + w;
max_val = x_ptr[max_idx];
}
}
}
y[idx] = max_val;
mask[idx] = max_idx;
}
}
template<typename T>
__global__ void _MAXPooling2d_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* x,
int* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int pc = idx % C;
const int pw = (idx / C) % pool_w;
const int ph = (idx / C / pool_w) % pool_h;
const int pn = idx / C / pool_w / pool_h;
int start_h = ph * stride_h - pad_h;
int start_w = pw * stride_w - pad_w;
const int end_h = min(start_h + kernel_h, H);
const int end_w = min(start_w + kernel_w, W);
start_h = max(start_h, 0);
start_w = max(start_w, 0);
T max_val = -FLT_MAX;
int max_idx = -1;
for (int h = start_h; h < end_h; ++h) {
for (int w = start_w; w < end_w; ++w) {
const int x_idx = ((pn * H + h) * W + w) * C + pc;
if (x[x_idx] > max_val) {
max_idx = x_idx;
max_val = x[max_idx];
}
}
}
y[idx] = max_val;
mask[idx] = max_idx;
}
}
template<> void MAXPooling2d<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const string& data_format,
const float* x,
int* mask,
float* y,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_MAXPooling2d_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, x, mask, y);
} else if (data_format == "NHWC") {
_MAXPooling2d_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, x, mask, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template<typename T>
__global__ void _AVGPooling2d_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int pw = idx % pool_w;
const int ph = (idx / pool_w) % pool_h;
const int pc = (idx / pool_w / pool_h) % C;
const int pn = idx / pool_w / pool_h / C;
int start_h = ph * stride_h - pad_h;
int start_w = pw * stride_w - pad_w;
int end_h = min(start_h + kernel_h, H + pad_h);
int end_w = min(start_w + kernel_w, W + pad_w);
start_h = max(start_h, 0);
start_w = max(start_w, 0);
end_h = min(end_h, H);
end_w = min(end_w, W);
const T* x_ptr = x + (pn * C + pc) * H * W;
const int pool_area = (end_h - start_h) * (end_w - start_w);
T avg_val = 0;
for (int h = start_h; h < end_h; ++h) {
for (int w = start_w; w < end_w; ++w) {
avg_val += x_ptr[h * W + w];
}
}
y[idx] = avg_val / pool_area;
}
}
template<typename T>
__global__ void _AVGPooling2d_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int pc = idx % C;
const int pw = (idx / C) % pool_w;
const int ph = (idx / C / pool_w) % pool_h;
const int pn = idx / C / pool_w / pool_h;
int start_h = ph * stride_h - pad_h;
int start_w = pw * stride_w - pad_w;
int end_h = min(start_h + kernel_h, H + pad_h);
int end_w = min(start_w + kernel_w, W + pad_w);
start_h = max(start_h, 0);
start_w = max(start_w, 0);
end_h = min(end_h, H);
end_w = min(end_w, W);
const int pool_area = (end_h - start_h) * (end_w - start_w);
T avg_val = 0;
for (int h = start_h; h < end_h; ++h)
for (int w = start_w; w < end_w; ++w)
avg_val += x[((pn * H + h) * W + w) * C + pc];
y[idx] = avg_val / pool_area;
}
}
template<> void AVGPooling2d<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const string& data_format,
const float* x,
float* y,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_AVGPooling2d_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, x, y);
} else if (data_format == "NHWC") {
_AVGPooling2d_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, x, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template<typename T>
__global__ void _MAXPooling2dGrad_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* dy,
const int* mask,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % W;
const int h = (idx / W) % H;
const int c = (idx / W / H) % C;
const int n = idx / W / H / C;
// allow overlapping
const int start_ph = (h + pad_h < kernel_h) ?
0 : (h + pad_h - kernel_h) / stride_h + 1;
const int start_pw = (w + pad_w < kernel_w) ?
0 : (w + pad_w - kernel_w) / stride_w + 1;
// allow clip
const int end_ph = min((h + pad_h) / stride_h + 1, pool_h);
const int end_pw = min((w + pad_w) / stride_w + 1, pool_w);
T grad = 0;
const int offset = (n * C + c) * pool_h * pool_w;
const T* dy_ptr = dy + offset;
const int* mask_ptr = mask + offset;
for (int ph = start_ph; ph < end_ph; ++ph) {
for (int pw = start_pw; pw < end_pw; ++pw) {
if (mask_ptr[ph * pool_w + pw] == (h * W + w)) {
grad += dy_ptr[ph * pool_w + pw];
}
}
}
dx[idx] = grad;
}
}
template<typename T>
__global__ void _MAXPooling2dGrad_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* dy,
const int* mask,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % W;
const int h = (idx / C / W) % H;
const int n = idx / C / W / H;
// allow overlapping
const int start_ph = (h + pad_h < kernel_h) ?
0 : (h + pad_h - kernel_h) / stride_h + 1;
const int start_pw = (w + pad_w < kernel_w) ?
0 : (w + pad_w - kernel_w) / stride_w + 1;
// allow clip
const int end_ph = min((h + pad_h) / stride_h + 1, pool_h);
const int end_pw = min((w + pad_w) / stride_w + 1, pool_w);
T grad = 0;
for (int ph = start_ph; ph < end_ph; ++ph) {
for (int pw = start_pw; pw < end_pw; ++pw) {
const int x_idx = ((n * H + h) * W + w) * C + c;
const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c;
if (mask[y_idx] == x_idx) grad += dy[y_idx];
}
}
dx[idx] = grad;
}
}
template<> void MAXPooling2dGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const string& data_format,
const float* dy,
const int* mask,
float* dx,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_MAXPooling2dGrad_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, dy,mask, dx);
} else if (data_format == "NHWC") {
_MAXPooling2dGrad_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, dy, mask, dx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template<typename T>
__global__ void _AVGPooling2dGrad_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % W;
const int h = (idx / W) % H;
const int c = (idx / W / H) % C;
const int n = idx / W / H / C;
const int start_ph = (h + pad_h < kernel_h) ?
0 : (h + pad_h - kernel_h) / stride_h + 1;
const int start_pw = (w + pad_w < kernel_w) ?
0 : (w + pad_w - kernel_w) / stride_w + 1;
const int end_ph = min(h / stride_h + 1, pool_h);
const int end_pw = min(w / stride_w + 1, pool_w);
T grad = 0;
const T* dy_ptr = dy + (n * C + c) * pool_h * pool_w;
for (int ph = start_ph; ph < end_ph; ++ph) {
for (int pw = start_pw; pw < end_pw; ++pw) {
int start_h = ph * stride_h - pad_h;
int start_w = pw * stride_w - pad_w;
int end_h = min(start_h + kernel_h, H + pad_h);
int end_w = min(start_w + kernel_w, W + pad_w);
int pool_area = (end_h - start_h) * (end_w - start_w);
grad += (dy_ptr[ph * pool_w + pw] / pool_area);
}
}
dx[idx] = grad;
}
}
template<typename T>
__global__ void _AVGPooling2dGrad_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % W;
const int h = (idx / C / W) % H;
const int n = idx / C / W / H;
const int start_ph = (h + pad_h < kernel_h) ?
0 : (h + pad_h - kernel_h) / stride_h + 1;
const int start_pw = (w + pad_w < kernel_w) ?
0 : (w + pad_w - kernel_w) / stride_w + 1;
const int end_ph = min(h / stride_h + 1, pool_h);
const int end_pw = min(w / stride_w + 1, pool_w);
T grad = 0;
for (int ph = start_ph; ph < end_ph; ++ph) {
for (int pw = start_pw; pw < end_pw; ++pw) {
int start_h = ph * stride_h - pad_h;
int start_w = pw * stride_w - pad_w;
int end_h = min(start_h + kernel_h, H + pad_h);
int end_w = min(start_w + kernel_w, W + pad_w);
int pool_area = (end_h - start_h) * (end_w - start_w);
const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c;
grad += (dy[y_idx] / pool_area);
}
}
dx[idx] = grad;
}
}
template<> void AVGPooling2dGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const string& data_format,
const float* dy,
float* dx,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_AVGPooling2dGrad_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, dy, dx);
} else if (data_format == "NHWC") {
_AVGPooling2dGrad_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, dy, dx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
/******************** vision.roi_pooling ********************/
template <typename T>
__global__ void _ROIPooling(
const int count,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pool_h,
const int pool_w,
const T* x,
const T* rois,
int* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int pw = idx % pool_w;
int ph = (idx / pool_w) % pool_h;
int c = (idx / pool_w / pool_h) % channels;
int n = idx / pool_w / pool_h / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
if (roi_batch_ind < 0) {
y[idx] = 0;
mask[idx] = -1;
continue;
}
int roi_start_w = round(offset_rois[1] * spatial_scale);
int roi_start_h = round(offset_rois[2] * spatial_scale);
int roi_end_w = round(offset_rois[3] * spatial_scale);
int roi_end_h = round(offset_rois[4] * spatial_scale);
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
const T bin_size_h = (T)roi_height / (T)pool_h;
const T bin_size_w = (T)roi_width / (T)pool_w;
int hstart = floor(bin_size_h * ph);
int wstart = floor(bin_size_w * pw);
int hend = ceil(bin_size_h * (ph + 1));
int wend = ceil(bin_size_w * (pw + 1));
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
float max_val = is_empty ? 0 : -FLT_MAX;
int max_idx = -1;
x += ((roi_batch_ind * channels + c) * height * width);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
const int x_idx = h * width + w;
if (x[x_idx] > max_val) {
max_val = x[x_idx];
max_idx = x_idx;
}
}
}
y[idx] = max_val;
mask[idx] = max_idx;
}
}
template<> void ROIPooling<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int num_rois,
const float spatial_scale,
const float* x,
const float* rois,
int* mask,
float* y,
CUDAContext* ctx) {
_ROIPooling<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
spatial_scale, C, H, W,
pool_h, pool_w, x, rois, mask, y);
}
template <typename T>
__global__ void _ROIPoolingGrad(
const int count,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pool_h,
const int pool_w,
const T* dy,
const T* rois,
const int* mask,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int w = idx % width;
int h = (idx / width) % height;
int c = (idx / width / height) % channels;
int n = idx / width / height / channels;
T gradient = 0;
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const T* offset_rois = rois + roi_n * 5;
int roi_batch_ind = offset_rois[0];
if (n != roi_batch_ind) continue;
int roi_start_w = round(offset_rois[1] * spatial_scale);
int roi_start_h = round(offset_rois[2] * spatial_scale);
int roi_end_w = round(offset_rois[3] * spatial_scale);
int roi_end_h = round(offset_rois[4] * spatial_scale);
const bool in_roi = (w >= roi_start_w &&
w <= roi_end_w &&
h >= roi_start_h &&
h <= roi_end_h);
if (!in_roi) continue;
int y_offset = (roi_n * channels + c) * pool_h * pool_w;
const T* offset_dy = dy + y_offset;
const int* offset_mask = mask + y_offset;
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
const T bin_size_h = (T)roi_height / (T)pool_h;
const T bin_size_w = (T)roi_width / (T)pool_w;
int phstart = floor(static_cast<T>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<T>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<T>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<T>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pool_h);
phend = min(max(phend, 0), pool_h);
pwstart = min(max(pwstart, 0), pool_w);
pwend = min(max(pwend, 0), pool_w);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int pool_idx = ph * pool_w + pw;
if (offset_mask[pool_idx] == (h * width + w)) {
gradient += offset_dy[pool_idx];
}
}
}
}
dx[idx] = gradient;
}
}
template<> void ROIPoolingGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int num_rois,
const float spatial_scale,
const float* dy,
const float* rois,
const int* mask,
float* dx,
CUDAContext* ctx) {
_ROIPoolingGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
num_rois, spatial_scale, C, H, W,
pool_h, pool_w, dy, rois, mask, dx);
}
/******************** vision.roi_align ********************/
template <typename T>
__device__ T _ROIAlignInterpolate(
const T* Xdata,
const int height,
const int width,
T y,
T x) {
if (y < -1.0 || y > height || x < -1.0 || x > width) return 0;
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
T v1 = Xdata[y_low * width + x_low];
T v2 = Xdata[y_low * width + x_high];
T v3 = Xdata[y_high * width + x_low];
T v4 = Xdata[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void _ROIAlign(
const int count,
const float spatial_scale,
const int channels,
const int height,
const int width,
const int pool_h,
const int pool_w,
const int sampling_ratio,
const T* Xdata,
const T* rois,
T* Ydata) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int pw = idx % pool_w;
int ph = (idx / pool_w) % pool_h;
int c = (idx / pool_w / pool_h) % channels;
int n = idx / pool_w / pool_h / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
if (roi_batch_ind < 0) {
Ydata[idx] = 0;
continue;
}
T roi_start_w = offset_rois[1] * spatial_scale;
T roi_start_h = offset_rois[2] * spatial_scale;
T roi_end_w = offset_rois[3] * spatial_scale;
T roi_end_h = offset_rois[4] * spatial_scale;
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w);
const T* offset_Xdata = Xdata +(roi_batch_ind * channels + c) * height * width;
int roi_bin_grid_h = (sampling_ratio > 0) ?
sampling_ratio : ceil(roi_height / pool_h);
int roi_bin_grid_w = (sampling_ratio > 0) ?
sampling_ratio : ceil(roi_width / pool_w);
const T num_bin_grids = roi_bin_grid_h * roi_bin_grid_w;
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = _ROIAlignInterpolate(offset_Xdata, height, width, y, x);
output_val += val;
}
}
output_val /= num_bin_grids;
Ydata[idx] = output_val;
}
}
template<> void ROIAlign<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int num_rois,
const float spatial_scale,
const int sampling_ratio,
const float* x,
const float* rois,
float* y,
CUDAContext* ctx) {
_ROIAlign<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
spatial_scale, C, H, W, pool_h, pool_w,
sampling_ratio, x, rois, y);
}
template <typename T>
__device__ void _ROIAlignInterpolateGrad(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high) {
if (y < -1.0 || y > height ||
x < -1.0 || x > width) {
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void _ROIAlignGrad(
const int count,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pool_h,
const int pool_w,
const int sampling_ratio,
const T* dYdata,
const T* rois,
T* dXdata) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int pw = idx % pool_w;
int ph = (idx / pool_w) % pool_h;
int c = (idx / pool_w / pool_h) % channels;
int n = idx / pool_w / pool_h / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
if (roi_batch_ind < 0) continue;
T roi_start_w = offset_rois[1] * spatial_scale;
T roi_start_h = offset_rois[2] * spatial_scale;
T roi_end_w = offset_rois[3] * spatial_scale;
T roi_end_h = offset_rois[4] * spatial_scale;
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w);
T* offset_dXdata = dXdata +
(roi_batch_ind * channels + c) * height * width;
int y_offset = (n * channels + c) * pool_h * pool_w;
const T* offset_dYdata = dYdata + y_offset;
const T dYdata_this_bin = offset_dYdata[ph * pool_w + pw];
int roi_bin_grid_h = (sampling_ratio > 0) ?
sampling_ratio : ceil(roi_height / pool_h);
int roi_bin_grid_w = (sampling_ratio > 0) ?
sampling_ratio : ceil(roi_width / pool_w);
const T num_bin_grids = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
_ROIAlignInterpolateGrad(
height, width, y, x, w1, w2, w3, w4,
x_low, x_high, y_low, y_high);
T g1 = dYdata_this_bin * w1 / num_bin_grids;
T g2 = dYdata_this_bin * w2 / num_bin_grids;
T g3 = dYdata_this_bin * w3 / num_bin_grids;
T g4 = dYdata_this_bin * w4 / num_bin_grids;
if (x_low >= 0 && x_high >= 0
&& y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_dXdata + y_low * width + x_low,
static_cast<T>(g1));
atomicAdd(
offset_dXdata + y_low * width + x_high,
static_cast<T>(g2));
atomicAdd(
offset_dXdata + y_high * width + x_low,
static_cast<T>(g3));
atomicAdd(
offset_dXdata + y_high * width + x_high,
static_cast<T>(g4));
}
}
}
}
}
template<> void ROIAlignGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int num_rois,
const float spatial_scale,
const int sampling_ratio,
const float* dy,
const float* rois,
float* dx,
CUDAContext* ctx) {
_ROIAlignGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
num_rois, spatial_scale, C, H, W,
pool_h, pool_w, sampling_ratio, dy, rois, dx);
}
} // namespace kernel
} // namespace dragon
#endif // WITH_CUDA | c10ae9dbb2423bd15baf911141a52ae08bc6e40b.cu | #ifdef WITH_CUDA
#include <cmath>
#include "core/context_cuda.h"
#include "core/tensor.h"
#include "utils/cuda_device.h"
#include "utils/op_kernel.h"
#include "utils/math_functions.h"
#include "utils/cast.h"
namespace dragon {
namespace kernel {
/******************** activation.dropout ********************/
template<typename T>
__global__ void _Dropout(
const int count,
const uint32_t thresh,
const T scale,
const T* x,
const uint32_t* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = x[idx] * (mask[idx] > thresh) * scale;
}
}
template<> void Dropout<float, CUDAContext>(
const int count,
float prob,
float scale,
const float* x,
uint32_t* mask,
float* y,
CUDAContext* ctx) {
uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob);
math::RandomUniform<uint32_t, CUDAContext>(
count, float(0), float(UINT_MAX), mask, ctx);
_Dropout<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, thresh, scale, x, mask, y);
}
template <typename T>
__global__ void _DropoutGrad(
const int count,
const uint32_t thresh,
const T scale,
const T* dy,
const uint32_t* mask,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = dy[idx] * (mask[idx] > thresh) * scale;
}
}
template<> void DropoutGrad<float, CUDAContext>(
const int count,
float prob,
float scale,
const float* dy,
const uint32_t* mask,
float* dx,
CUDAContext* ctx) {
uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob);
_DropoutGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, thresh, scale, dy, mask, dx);
}
/******************** activation.prelu ********************/
template <typename T>
__global__ void _PRelu(
const int count,
const int channels,
const int dim,
const T* x,
const T* w,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = (x[idx] > 0) * x[idx] +
(x[idx] < 0) * x[idx] * w[0];
}
}
template <typename T>
__global__ void _PReluNCHW(
const int count,
const int channels,
const int dim,
const T* x,
const T* w,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = (idx / dim) % channels;
y[idx] = (x[idx] > 0) * x[idx] +
(x[idx] < 0) * x[idx] * w[c];
}
}
template <typename T>
__global__ void _PReluNHWC(
const int count,
const int channels,
const int dim,
const T* x,
const T* w,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % channels;
y[idx] = (x[idx] > 0) * x[idx] +
(x[idx] < 0) * x[idx] * w[c];
}
}
template<> void PRelu<float, CUDAContext>(const int count,
const int channels,
const int dim,
const bool channel_shared,
const string& data_format,
const float* x,
const float* w,
float* y,
CUDAContext* ctx) {
if (channel_shared) {
_PRelu<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
channels, dim, x, w, y);
} else {
if (data_format == "NCHW") {
_PReluNCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
channels, dim, x, w, y);
} else if (data_format == "NHWC") {
_PReluNHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
channels, dim, x, w, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
}
template <typename T>
__global__ void _PReluGrad(
const int count,
const int channels,
const int dim,
const T* dy,
const T* x,
const T* w,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = dy[idx] * (
(x[idx] > 0) + (x[idx] <= 0) * w[0]
);
}
}
template <typename T>
__global__ void _PReluGradNCHW(
const int count,
const int channels,
const int dim,
const T* dy,
const T* x,
const T* w,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = (idx / dim) % channels;
dx[idx] = dy[idx] * (
(x[idx] > 0) + (x[idx] <= 0) * w[c]
);
}
}
template <typename T>
__global__ void _PReluGradNHWC(
const int count,
const int channels,
const int dim,
const T* dy,
const T* x,
const T* w,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % channels;
dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]);
}
}
template<> void PReluGrad<float, CUDAContext>(
const int count,
const int channels,
const int dim,
const bool channel_shared,
const string& data_format,
const float* dy,
const float* x,
const float* w,
float* dx,
CUDAContext* ctx) {
if (channel_shared) {
_PReluGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
channels, dim, dy, x, w, dx);
} else {
if (data_format == "NCHW") {
_PReluGradNCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
channels, dim, dy, x, w, dx);
} else if (data_format == "NHWC") {
_PReluGradNHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
channels, dim, dy, x, w, dx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
}
template <typename T>
__global__ void _PReluWGradBcast(
const int count,
const int rows,
const int row_offset,
const T* dy,
const T* x,
T* bcast_dw) {
CUDA_1D_KERNEL_LOOP(idx, count) {
bcast_dw[idx] = dy[idx] * x[idx] * (x[idx] <= 0);
for (int n = 1; n < rows; n++) {
const int cur_idx = idx + n * row_offset;
bcast_dw[idx] +=
dy[cur_idx] * x[cur_idx] * (x[cur_idx] <= 0);
}
}
}
template<> void PReluWGrad<float, CUDAContext>(
const int rows,
const int row_offset,
const int channels,
const int dim,
const bool channel_shared,
const string& data_format,
const float* dy,
const float* x,
const float* multiplier,
float* bcast_dw,
float* dw,
CUDAContext* ctx) {
const int cdim = channels * dim;
_PReluWGradBcast<float>
<< < CUDA_BLOCKS(cdim), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
cdim, rows, row_offset, dy, x, bcast_dw);
if (channel_shared) {
float w_sum;
math::Dot<float, CUDAContext>(channels * dim,
bcast_dw, multiplier, &w_sum, ctx);
math::AddScalar<float, CUDAContext>(1, w_sum, dw, ctx);
} else {
if (data_format == "NCHW") {
math::Gemv<float, CUDAContext>(
CblasNoTrans, channels, dim,
1.0, bcast_dw, multiplier, 1.0, dw, ctx);
} else if (data_format == "NHWC") {
math::Gemv<float, CUDAContext>(
CblasTrans, dim, channels,
1.0, bcast_dw, multiplier, 1.0, dw, ctx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
}
/******************** activation.elu ********************/
template <typename T>
__global__ void _Elu(
const int count,
const T* x,
const float alpha,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = x[idx] > 0 ? x[idx] :
alpha * (exp(x[idx]) - 1);
}
}
template<> void Elu<float, CUDAContext>(
const int count,
const float alpha,
const float* x,
float* y,
CUDAContext* ctx) {
_Elu<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, x, alpha, y);
}
template <typename T>
__global__ void _EluGrad(
const int count,
const float alpha,
const T* dy,
const T* y,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = dy[idx] * (
(y[idx] > 0) + (alpha + y[idx]) * (y[idx] <= 0)
);
}
}
template<> void EluGrad<float, CUDAContext>(
const int count,
const float alpha,
const float* dy,
const float* y,
float* dx,
CUDAContext* ctx) {
_EluGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, alpha, dy, y, dx);
}
/******************** activation.relu ********************/
template <typename T>
__global__ void _Relu(
const int count,
const float slope,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = x[idx] > 0 ? x[idx] : x[idx] * slope;
}
}
template<> void Relu<float, CUDAContext>(
const int count,
const float slope,
const float* x,
float* y,
CUDAContext* ctx) {
_Relu<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, slope, x, y);
}
template <typename T>
__global__ void _ReluGrad(
const int count,
const float slope,
const T* dy,
const T* y,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = dy[idx] * (
(y[idx] > 0) + slope * (y[idx] <= 0)
);
}
}
template<> void ReluGrad<float, CUDAContext>(
const int count,
const float slope,
const float* dy,
const float* y,
float* dx,
CUDAContext* ctx) {
_ReluGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, slope, dy, y, dx);
}
/******************** activation.selu ********************/
template <typename T>
__global__ void _SElu(
const int count,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = x[idx] > 0 ? 1.0507 * x[idx] :
1.7581 * (exp(x[idx]) - 1);
}
}
template<> void SElu<float, CUDAContext>(
const int count,
const float* x,
float* y,
CUDAContext* ctx) {
_SElu<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, x, y);
}
template <typename T>
__global__ void _SEluGrad(
const int count,
const T* dy,
const T* y,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = y[idx] > 0 ? 1.0507 * dy[idx] :
(1.7581 + y[idx]) * dy[idx];
}
}
template<> void SEluGrad<float, CUDAContext>(
const int count,
const float* dy,
const float* y,
float* dx,
CUDAContext* ctx) {
_SEluGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, dy, y, dx);
}
/******************** activation.sigmoid ********************/
template <typename T>
__device__ T _SigmoidUnit(const T x) {
return T(1) / (T(1) + exp(-x));
}
template <typename T>
__global__ void _Sigmoid(
const int n,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, n) {
y[idx] = _SigmoidUnit<T>(x[idx]);
}
}
template<> void Sigmoid<float, CUDAContext>(
const int count,
const float* x,
float* y,
CUDAContext* ctx) {
_Sigmoid<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, x, y);
}
template <typename T>
__global__ void _SigmoidGrad(
const int count,
const T* dy,
const T* y,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = dy[idx] * y[idx] * (1 - y[idx]);
}
}
template<> void SigmoidGrad<float, CUDAContext>(
const int count,
const float* dy,
const float* y,
float* dx,
CUDAContext* ctx) {
_SigmoidGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, dy, y, dx);
}
/******************** activation.softmax ********************/
template <typename T>
__global__ void _SoftmaxMaxClass(
const int outer_dim,
const int classes,
const int inner_dim,
const T* x,
T* scale) {
CUDA_1D_KERNEL_LOOP(idx, outer_dim * inner_dim) {
int o_idx = idx / inner_dim;
int i_idx = idx % inner_dim;
T max_val = -FLT_MAX;
for (int c = 0; c < classes; c++)
max_val = max(
x[(o_idx * classes + c) * inner_dim + i_idx], max_val
);
scale[idx] = max_val;
}
}
template <typename T>
__global__ void _SoftmaxSubtract(
const int count,
const int classes,
const int inner_dim,
const T* scale,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int o_idx = idx / inner_dim / classes;
int i_idx = idx % inner_dim;
y[idx] -= scale[o_idx * inner_dim + i_idx];
}
}
template <typename T>
__global__ void _SoftmaxExp(
const int count,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = exp(y[idx]);
}
}
template <typename T>
__global__ void _SoftmaxSumClass(
const int outer_dim,
const int classes,
const int inner_dim,
const T* y,
T* scale) {
CUDA_1D_KERNEL_LOOP(idx, outer_dim * inner_dim) {
int o_idx = idx / inner_dim;
int i_idx = idx % inner_dim;
T sum = 0;
for (int c = 0; c < classes; c++)
sum += y[(o_idx * classes + c) * inner_dim + i_idx];
scale[idx] = sum;
}
}
template <typename T>
__global__ void _SoftmaxDiv(
const int count,
const int classes,
const int inner_dim,
const T* scale,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int o_idx = idx / inner_dim / classes;
int i_idx = idx % inner_dim;
y[idx] /= scale[o_idx * inner_dim + i_idx];
}
}
template<> void Softmax<float, CUDAContext>(
const int count,
const int classes,
const int outer_dim,
const int inner_dim,
const float* sum_multiplier,
const float* x,
float* scale,
float* y,
CUDAContext* ctx) {
const int num_preds = inner_dim * outer_dim;
_SoftmaxMaxClass<float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
outer_dim, classes, inner_dim, x, scale);
_SoftmaxSubtract<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, classes, inner_dim, scale, y);
_SoftmaxExp<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, y);
_SoftmaxSumClass<float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
outer_dim, classes, inner_dim, y, scale);
_SoftmaxDiv<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, classes, inner_dim, scale, y);
}
template <typename T>
__global__ void _SoftmaxDot(
const int outer_dim,
const int classes,
const int inner_dim,
const T* dy,
const T* y,
T* scale) {
CUDA_1D_KERNEL_LOOP(idx, outer_dim * inner_dim) {
int o_idx = idx / inner_dim;
int i_idx = idx % inner_dim;
T dot = 0;
for (int c = 0; c < classes; c++)
dot += (
y[(o_idx * classes + c) * inner_dim + i_idx] *
dy[(o_idx * classes + c) * inner_dim + i_idx]
);
scale[idx] = dot;
}
}
template<> void SoftmaxGrad<float, CUDAContext>(
const int count,
const int classes,
const int outer_dim,
const int inner_dim,
const float* sum_multiplier,
const float* dy,
const float* y,
float* scale,
float* dx,
CUDAContext* ctx) {
const int num_preds = inner_dim * outer_dim;
_SoftmaxDot<float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
outer_dim, classes, inner_dim, dy, y, scale);
_SoftmaxSubtract<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, classes,inner_dim, scale, dx);
math::Mul<float, CUDAContext>(count, dx, y, dx, ctx);
}
/******************** activation.tanh ********************/
template <typename T>
__global__ void _Tanh(
const int count,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, count) {
y[i] = tanh(x[i]);
}
}
template<> void Tanh<float, CUDAContext>(
const int count,
const float* x,
float* y,
CUDAContext* ctx) {
_Tanh<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, x, y);
}
template <typename T>
__global__ void _TanhGrad(
const int count,
const T* dy,
const T* y,
T* dx) {
CUDA_1D_KERNEL_LOOP(i, count) {
dx[i] = dy[i] * (1 - y[i] * y[i]);
}
}
template<> void TanhGrad<float, CUDAContext>(
const int count,
const float* dy,
const float* y,
float* dx,
CUDAContext* ctx) {
_TanhGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, dy, y, dx);
}
/******************** arithmetic.scale ********************/
template <typename T>
__global__ void _AffineWithOBias(
const int count,
const int scale_dim,
const int inner_dim,
const T* x,
const T* alpha,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int scale_idx = (idx / inner_dim) % scale_dim;
y[idx] = alpha[scale_idx] * x[idx];
}
}
template <typename T>
__global__ void _AffineWithBias(
const int count,
const int scale_dim,
const int inner_dim,
const T* x,
const T* alpha,
const T* beta,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int scale_idx = (idx / inner_dim) % scale_dim;
y[idx] = alpha[scale_idx] * x[idx] + beta[scale_idx];
}
}
template<> void Affine<float, CUDAContext>(
const int count,
const int outer_dim,
const int scale_dim,
const int inner_dim,
const float* x,
const float* alpha,
const float* beta,
const float* beta_multiplier,
float* y,
CUDAContext* ctx) {
if (beta != nullptr) {
_AffineWithBias<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, scale_dim, inner_dim,
x, alpha, beta, y);
} else {
_AffineWithOBias<float>
<< <CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, scale_dim, inner_dim,
x, alpha, y);
}
}
template <> void AffineGrad<float, CUDAContext>(
const int count,
const int outer_dim,
const int scale_dim,
const int inner_dim,
const float* dy,
const float* alpha,
float* dx,
CUDAContext* ctx) {
_AffineWithOBias<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, scale_dim, inner_dim,
dy, alpha, dx);
}
/******************** arithmetic.clip ********************/
template <typename T>
__global__ void _Clip(
const int count,
const T low,
const T high,
const T* x,
T* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
mask[idx] = 1.0;
if (x[idx] > high || x[idx] < low) mask[idx] = 0.0;
y[idx] = x[idx] > high ? high : x[idx];
y[idx] = x[idx] < low ? low : x[idx];
}
}
template <> void Clip<float, CUDAContext>(
const int count,
const float low,
const float high,
const float* x,
float* mask,
float* y,
CUDAContext* ctx) {
_Clip<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
low, high, x, mask, y);
}
/******************** control_flow.compare ********************/
template <typename T>
__global__ void _Equal(
const int count,
const T* a,
const T* b,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = fabs(a[idx] - b[idx]) < FLT_EPSILON ? 1.0 : 0.0;
}
}
template <> void Equal<float, CUDAContext>(
const int count,
const float* a,
const float* b,
float* y,
CUDAContext* ctx) {
_Equal<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, a, b, y);
}
/******************** loss.l1_loss ********************/
template <typename T>
__global__ void _AbsGrad(
const int count,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const T val = dy[idx];
// val > 0: 1 | val == 0: 0 | val < 0: -1
dx[idx] = (val > T(0)) - (val < T(0));
}
}
template<> void AbsGrad<float, CUDAContext>(
const int count,
const float* dy,
float* dx,
CUDAContext* ctx) {
_AbsGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, dy, dx);
}
/******************** loss.sigmoid_cross_entropy ********************/
template <typename T>
__global__ void _SigmoidCrossEntropy(
const int count,
const T* logits,
const T* targets,
T* losses,
T* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
if (targets[idx] < 0) {
losses[idx] = flags[idx] = 0;
} else {
losses[idx] = log(1 +
exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0))
) + logits[idx] * ((logits[idx] >= 0) - targets[idx]);
flags[idx] = 1;
}
}
}
template <> void SigmoidCrossEntropy<float, CUDAContext>(
const int count,
const float* logits,
const float* targets,
float* losses,
float* flags,
CUDAContext* ctx) {
_SigmoidCrossEntropy<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, logits, targets, losses, flags);
}
template <typename T>
__global__ void _SigmoidCrossEntropyGrad(
const int count,
const T* logits,
const T* targets,
T* dlogits,
T* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
if (targets[idx] < 0) {
dlogits[idx] = flags[idx] = 0;
} else {
dlogits[idx] = 1 / (1 + exp(-logits[idx])) - targets[idx];
flags[idx] = 1;
}
}
}
template <> void SigmoidCrossEntropyGrad<float, CUDAContext>(
const int count,
const float* logits,
const float* targets,
float* dlogits,
float* flags,
CUDAContext* ctx) {
_SigmoidCrossEntropyGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, logits, targets, dlogits, flags);
}
/******************** loss.sigmoid_focal_loss ********************/
template <typename T>
__global__ void _SigmoidFocalLoss(
const int count,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const T* logits,
const T* targets,
T* losses,
T* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int iix = idx % inner_dim;
const int aix = (idx / inner_dim) % axis_dim;
const int oix = idx / inner_dim / axis_dim;
const int t = targets[oix * inner_dim + iix];
// ``0`` is reserved for targets if neg id is zero
// use ``aix + 1`` to match the targets
T c1 = (t == (aix + (neg_id ? 0 : 1)));
T c2 = (t != -1) & (t != (aix + (neg_id ? 0 : 1)));
T p = 1 / (1 + exp(-logits[idx])); // logit -> prob
// (1 - p)^{gamma} * log(p)
T pos_term = pow(1 - p, gamma) * log(max(p, FLT_MIN));
// p^{gamma} * log(1 - p)
T neg_term = pow(p, gamma) * (
-logits[idx] * (logits[idx] >= 0) - log(
1 + exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0)))
);
losses[idx] = 0.0;
losses[idx] += -c1 * pos_term * pos_alpha;
losses[idx] += -c2 * neg_term * neg_alpha;
flags[idx] = c1;
}
}
template <> void SigmoidFocalLoss<float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const float* logits,
const float* targets,
float* losses,
float* flags,
CUDAContext* ctx) {
TIndex count = outer_dim * axis_dim * inner_dim;
_SigmoidFocalLoss<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, axis_dim, inner_dim,
pos_alpha, neg_alpha, gamma, neg_id,
logits, targets, losses, flags);
}
template <typename T>
__global__ void _SigmoidFocalLossGradient(
const int count,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const T* logits,
const T* targets,
T* dlogits,
T* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int iix = idx % inner_dim;
const int aix = (idx / inner_dim) % axis_dim;
const int oix = idx / inner_dim / axis_dim;
const int t = targets[oix * inner_dim + iix];
// ``0`` is reserved for targets if neg id is zero
// use ``aix + 1`` to match the targets
T c1 = (t == (aix + (neg_id ? 0 : 1)));
T c2 = (t != -1) & (t != (aix + (neg_id ? 0 : 1)));
T p = 1 / (1 + exp(-logits[idx])); // logit -> prob
// (1 - p)^{gamma} * (1 - p - gamma * p * log(p))
T pos_term = pow((1 - p), gamma) * (
1 - p - p * gamma * log(max(p, FLT_MIN))
);
// p^{gamma} * (gamma * (1 - p) * log(1-p) - p)
T neg_term = pow(p, gamma) * (
(-logits[idx] * (logits[idx] >= 0) - log(
1 + exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0)))
) * (1 - p) * gamma - p
);
dlogits[idx] = 0.0;
dlogits[idx] += -c1 * pos_term * pos_alpha;
dlogits[idx] += -c2 * neg_term * neg_alpha;
flags[idx] = c1;
}
}
template <> void SigmoidFocalLossGradient<float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const float* logits,
const float* targets,
float* dlogits,
float* flags,
CUDAContext* ctx) {
TIndex count = outer_dim * axis_dim * inner_dim;
_SigmoidFocalLossGradient<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, axis_dim, inner_dim,
pos_alpha, neg_alpha, gamma, neg_id,
logits, targets, dlogits, flags);
}
/******************** loss.smooth_l1_loss ********************/
template <typename T>
__global__ void _SmoothL1(
const int count,
const float beta,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const T val = x[idx];
const T abs_val = abs(val);
if (abs_val < beta) y[idx] = 0.5 * val * val / beta;
else y[idx] = abs_val - 0.5 * beta;
}
}
template<> void SmoothL1<float, CUDAContext>(
const int count,
const float beta,
const float* x,
float* y,
CUDAContext* ctx) {
_SmoothL1<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, beta, x, y);
}
template <typename T>
__global__ void _SmoothL1Grad(
const int count,
const float beta,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const T val = dy[idx];
const T abs_val = abs(val);
if (abs_val < beta) dx[idx] = val / beta;
// val > 0: 1 | val == 0: 0 | val < 0: -1
else dx[idx] = (val > T(0)) - (val < T(0));
}
}
template<> void SmoothL1Grad<float, CUDAContext>(
const int count,
const float beta,
const float* dy,
float* dx,
CUDAContext* ctx) {
_SmoothL1Grad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, beta, dy, dx);
}
/******************** loss.softmax_cross_entropy ********************/
template <typename T>
__global__ void _SoftmaxCrossEntropy(
const int count,
const T* prob,
const T* target,
T* loss) {
CUDA_1D_KERNEL_LOOP(idx, count) {
loss[idx] = -target[idx] * log(max(prob[idx], FLT_MIN));
}
}
template <> void SoftmaxCrossEntropy<float, CUDAContext>(
const int count,
const float* prob,
const float* target,
float* loss,
CUDAContext* ctx) {
_SoftmaxCrossEntropy<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, prob, target, loss);
}
/******************** loss.softmax_focal_loss ********************/
template <typename T>
__global__ void _SoftmaxFocalLoss(
const int count,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const T* prob,
const T* labels,
const int* ignores,
const int num_ignores,
T* losses,
T* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
const int label = labels[oix * inner_dim + iix];
int k;
for (k = 0; k < num_ignores; k++) {
if (label == ignores[k]) {
losses[idx] = flags[idx] = 0;
break;
}
}
if (k == num_ignores) {
const int t = (oix * axis_dim + label) * inner_dim + iix;
T scale = pow(1.f - prob[t], gamma);
scale = label > neg_id ?
pos_alpha * scale : neg_alpha * scale;
losses[idx] = -scale * log(max(prob[t], FLT_MIN));
flags[idx] = label > neg_id ? 1 : 0;
}
}
}
template <> void SoftmaxFocalLoss<float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const float* prob,
const float* labels,
const int* ignores,
const int num_ignores,
float* losses,
float* flags,
CUDAContext* ctx) {
const int num_preds = outer_dim * inner_dim;
_SoftmaxFocalLoss<float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
num_preds, axis_dim, inner_dim,
pos_alpha, neg_alpha, gamma, neg_id,
prob, labels, ignores, num_ignores,
losses, flags);
}
template <typename T>
__global__ void _SoftmaxFocalLossGrad(
const int count,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const T* prob,
const T* labels,
const int* ignores,
const int num_ignores,
T* dx,
T* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
const int label = labels[oix * inner_dim + iix];
int k;
for (k = 0; k < num_ignores; k++)
if (label == ignores[k]) break;
if (k != num_ignores) {
for (int c = 0; c < axis_dim; c++)
dx[(oix * axis_dim + c) * inner_dim + iix] = 0;
flags[idx] = 0;
} else {
const int t = (oix * axis_dim + label) * inner_dim + iix;
T onemp = 1. - prob[t];
// unstable if gamma is 0
T grad = -gamma * pow(onemp, gamma - 1)
* log(max(prob[t], FLT_MIN))
* prob[t] + pow(onemp, gamma);
grad = label > neg_id ?
pos_alpha * grad : neg_alpha * grad;
for (int c = 0; c < axis_dim; c++) {
const int i = (oix * axis_dim + c) * inner_dim + iix;
if (c == label) {
dx[i] = grad * (prob[t] - 1);
} else {
dx[i] = grad * prob[i];
}
}
flags[idx] = label > neg_id ? 1 : 0;
}
}
}
template<> void SoftmaxFocalLossGrad<float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float pos_alpha,
const float neg_alpha,
const float gamma,
const int neg_id,
const float* prob,
const float* labels,
const int* ignores,
const int num_ignores,
float* dx,
float* flags,
CUDAContext* ctx) {
const int num_preds = outer_dim * inner_dim;
_SoftmaxFocalLossGrad<float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
num_preds, axis_dim, inner_dim,
pos_alpha, neg_alpha, gamma, neg_id,
prob, labels, ignores, num_ignores,
dx, flags);
}
/******************** loss.sparse_softmax_cross_entropy ********************/
template <typename Tx, typename Ty>
__global__ void _SparseSoftmaxCrossEntropy(
const int count,
const int axis_dim,
const int inner_dim,
const Tx* prob,
const Ty* labels,
const int* ignores,
const int num_ignores,
Tx* losses,
Tx* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
const int label = labels[oix * inner_dim + iix];
int k;
for (k = 0; k < num_ignores; k++) {
if (label == ignores[k]) {
losses[idx] = flags[idx] = 0;
break;
}
}
if (k == num_ignores) {
losses[idx] = -log(
max(prob[(oix * axis_dim + label)
* inner_dim + iix], FLT_MIN)
);
flags[idx] = 1;
}
}
}
template <> void SparseSoftmaxCrossEntropy<float, float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float* prob,
const float* labels,
const int* ignores,
const int num_ignores,
float* losses,
float* flags,
CUDAContext* ctx) {
const int num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropy<float, float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
num_preds, axis_dim, inner_dim,
prob, labels, ignores, num_ignores,
losses, flags);
}
template <> void SparseSoftmaxCrossEntropy<float, int64_t, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float* prob,
const int64_t* labels,
const int* ignores,
const int num_ignores,
float* losses,
float* flags,
CUDAContext* ctx) {
const int num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropy<float, int64_t>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
num_preds, axis_dim, inner_dim,
prob, labels, ignores, num_ignores,
losses, flags);
}
template <typename Tx, typename Ty>
__global__ void _SparseSoftmaxCrossEntropyGrad(
const int count,
const int axis_dim,
const int inner_dim,
const Tx* prob,
const Ty* labels,
const int* ignores,
const int num_ignores,
Tx* dx,
Tx* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
const int label = labels[oix * inner_dim + iix];
int k;
for (k = 0; k < num_ignores; k++)
if (label == ignores[k]) break;
if (k != num_ignores) {
for (int c = 0; c < axis_dim; c++)
dx[(oix * axis_dim + c) * inner_dim + iix] = 0;
flags[idx] = 0;
} else {
dx[(oix * axis_dim + label) * inner_dim + iix] -= 1;
flags[idx] = 1;
}
}
}
template<> void SparseSoftmaxCrossEntropyGrad<float, float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float* prob,
const float* labels,
const int* ignores,
const int num_ignores,
float* dx,
float* flags,
CUDAContext* ctx) {
const int num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropyGrad<float, float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
num_preds, axis_dim, inner_dim,
prob, labels, ignores, num_ignores,
dx, flags);
}
template<> void SparseSoftmaxCrossEntropyGrad<float, int64_t, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const float* prob,
const int64_t* labels,
const int* ignores,
const int num_ignores,
float* dx,
float* flags,
CUDAContext* ctx) {
const int num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropyGrad<float, int64_t>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
num_preds, axis_dim, inner_dim,
prob, labels, ignores, num_ignores,
dx, flags);
}
/******************** misc.astype ********************/
template <typename Ta, typename Tb>
__global__ void _TypeA2B(
const int count,
const Ta* a,
Tb* b) {
CUDA_1D_KERNEL_LOOP(idx, count) {
b[idx] = a[idx];
}
}
#define DEFINE_TYPE_A2B(type_a, type_b) \
template <> void TypeA2B<type_a, type_b, CUDAContext>( \
const int count, \
const type_a* a, \
type_b* b, \
CUDAContext* ctx) { \
_TypeA2B<type_a, type_b> \
<< < CUDA_BLOCKS(count), CUDA_THREADS, \
0, ctx->cuda_stream() >> >(count, a, b); \
}
#define DEFINE_TYPE_A2ALL(type_a) \
DEFINE_TYPE_A2B(type_a, float); \
DEFINE_TYPE_A2B(type_a, double); \
DEFINE_TYPE_A2B(type_a, int); \
DEFINE_TYPE_A2B(type_a, int64_t); \
DEFINE_TYPE_A2B(type_a, uint8_t);
DEFINE_TYPE_A2ALL(float);
DEFINE_TYPE_A2ALL(double);
DEFINE_TYPE_A2ALL(int);
DEFINE_TYPE_A2ALL(int64_t);
DEFINE_TYPE_A2ALL(uint8_t);
/******************** misc.image_data ********************/
template <typename Tx, typename Ty>
__global__ void _ImageData_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const float* mean_values,
const float* std_values,
const Tx* x,
Ty* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % W;
const int h = (idx / W) % H;
const int c = (idx / W / H) % C;
const int n = idx / W / H / C;
Ty raw_value = x[((n * H + h) * W + w) * C + c];
if (mean_values != nullptr) raw_value -= mean_values[c];
if (std_values != nullptr) raw_value /= std_values[c];
y[idx] = raw_value;
}
}
template <typename Tx, typename Ty>
__global__ void _ImageData_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const float* mean_values,
const float* std_values,
const Tx* x,
Ty* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
Ty raw_value = x[idx];
if (mean_values != nullptr) raw_value -= mean_values[c];
if (std_values != nullptr) raw_value /= std_values[c];
y[idx] = raw_value;
}
}
template <> void ImageData<float, float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const float* mean_values,
const float* std_values,
const string& data_format,
const float* x,
float* y,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_ImageData_NCHW<float, float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, mean_values, std_values, x, y);
} else if (data_format == "NHWC") {
_ImageData_NHWC<float, float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, mean_values, std_values, x, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template <> void ImageData<uint8_t, float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const float* mean_values,
const float* std_values,
const string& data_format,
const uint8_t* x,
float* y,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_ImageData_NCHW<uint8_t, float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, mean_values, std_values, x, y);
} else if (data_format == "NHWC") {
_ImageData_NHWC<uint8_t, float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, mean_values, std_values, x, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
/******************** ndarray.arange ********************/
template <typename T>
__global__ void _Arange(
const int count,
const int start,
const int step,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = start + idx * step;
}
}
template<> void Arange<float, CUDAContext>(
const int count,
const int start,
const int step,
float* y,
CUDAContext* ctx) {
_Arange<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, start, step, y);
}
template<> void Arange<int, CUDAContext>(
const int count,
const int start,
const int step,
int* y,
CUDAContext* ctx) {
_Arange<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, start, step, y);
}
/******************** ndarray.argreduce ********************/
template <typename T>
__global__ void _Argmax(
const int count,
const int axis_dim,
const int inner_dim,
const T neg_bound,
const T* x,
int64_t* indices) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
int max_idx = -1; T max_val = neg_bound;
for (int j = 0; j < axis_dim; ++j) {
const T val = x[(oix * axis_dim + j)
* inner_dim + iix];
if (val > max_val) {
max_val = val;
max_idx = j;
}
}
indices[idx] = max_idx;
}
}
template <typename T>
__global__ void _Argmax_v2(
const int count,
const int axis_dim,
const int inner_dim,
const T neg_bound,
const T* x,
int64_t* indices,
T* values) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
int max_idx = -1; T max_val = neg_bound;
for (int j = 0; j < axis_dim; ++j) {
const T val = x[(oix * axis_dim + j)
* inner_dim + iix];
if (val > max_val) {
max_val = val;
max_idx = j;
}
}
indices[idx] = max_idx;
values[idx] = max_val;
}
}
template<> void Argmax<float, CUDAContext>(
const int count,
const int axis_dim,
const int inner_dim,
const int top_k,
const float* x,
int64_t* indices,
float* values,
CUDAContext* ctx) {
CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA";
if (values == nullptr) {
_Argmax<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
axis_dim, inner_dim, -FLT_MAX,
x, indices);
} else {
_Argmax_v2<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
axis_dim, inner_dim, -FLT_MAX,
x, indices, values);
}
}
template <typename T>
__global__ void _Argmin(
const int count,
const int axis_dim,
const int inner_dim,
const T pos_bound,
const T* x,
int64_t* indices) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
int min_idx = -1; T min_val = pos_bound;
for (int j = 0; j < axis_dim; ++j) {
const T val = x[(oix * axis_dim + j)
* inner_dim + iix];
if (val < min_val) {
min_val = val;
min_idx = j;
}
}
indices[idx] = min_idx;
}
}
template <typename T>
__global__ void _Argmin_v2(
const int count,
const int axis_dim,
const int inner_dim,
const T pos_bound,
const T* x,
int64_t* indices,
T* values) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
int min_idx = -1; T min_val = pos_bound;
for (int j = 0; j < axis_dim; ++j) {
const T val = x[(oix * axis_dim + j)
* inner_dim + iix];
if (val < min_val) {
min_val = val;
min_idx = j;
}
}
indices[idx] = min_idx;
values[idx] = min_val;
}
}
template<> void Argmin<float, CUDAContext>(
const int count,
const int axis_dim,
const int inner_dim,
const int top_k,
const float* x,
int64_t* indices,
float* values,
CUDAContext* ctx) {
CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA";
if (values == nullptr) {
_Argmin<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
axis_dim, inner_dim, FLT_MAX,
x, indices);
} else {
_Argmin_v2<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
axis_dim, inner_dim, FLT_MAX,
x, indices, values);
}
}
/******************** ndarray.gather ********************/
template <typename T>
__global__ void _CanonicalAxis(
const int count,
const int dim,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
if (y[idx] < 0) y[idx] += dim;
}
}
template <> void CanonicalAxis<int, CUDAContext>(
const int count,
const int dim,
int* y,
CUDAContext* ctx) {
_CanonicalAxis<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count, dim, y);
}
template <typename T>
__global__ void _Gather(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int* indices,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int outer_idx = idx / inner_dim / y_slice_dim;
const int slice_idx = idx % inner_dim;
const int y_idx_offset = (idx / inner_dim) % y_slice_dim;
const int x_idx_offset = indices[y_idx_offset];
const int x_idx = (outer_idx * x_slice_dim + x_idx_offset)
* inner_dim + slice_idx;
y[idx] = x[x_idx];
}
}
template <> void Gather<float, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int* indices,
const float* x,
float* y,
CUDAContext* ctx) {
_Gather<float>
<< <CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_slice_dim, y_slice_dim,
indices, x, y);
}
template <> void Gather<int, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int* indices,
const int* x,
int* y,
CUDAContext* ctx) {
_Gather<int>
<< <CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_slice_dim, y_slice_dim,
indices, x, y);
}
template <typename T>
__global__ void _GatherGrad(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int* indices,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int outer_idx = idx / inner_dim / y_slice_dim;
const int slice_idx = idx % inner_dim;
const int y_idx_offset = (idx / inner_dim) % y_slice_dim;
const int x_idx_offset = indices[y_idx_offset];
const int x_idx = (outer_idx * x_slice_dim + x_idx_offset)
* inner_dim + slice_idx;
atomicAdd(dx + x_idx, dy[idx]);
}
}
template <> void GatherGrad<float, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int* indices,
const float* dy,
float* dx,
CUDAContext* ctx) {
_GatherGrad<float>
<< <CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_slice_dim, y_slice_dim,
indices, dy, dx);
}
template <> void GatherGrad<int, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int* indices,
const int* dy,
int* dx,
CUDAContext* ctx) {
_GatherGrad<int>
<< <CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_slice_dim, y_slice_dim,
indices, dy, dx);
}
/******************** ndarray.concat ********************/
template <typename T>
__global__ void _Concat(
const int count,
const int outer_dim,
const int inner_dim,
const int x_concat_dim,
const int y_concat_dim,
const int concat_offset,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int tmp = x_concat_dim * inner_dim;
const int outer_idx = idx / tmp;
const int concat_idx = idx % tmp;
const int y_idx = (outer_idx * y_concat_dim + concat_offset)
* inner_dim + concat_idx;
y[y_idx] = x[idx];
}
}
template <> void Concat<float, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_concat_dim,
const int y_concat_dim,
const int concat_offset,
const float* x,
float* y,
CUDAContext* ctx) {
_Concat<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_concat_dim, y_concat_dim,
concat_offset, x, y);
}
template <typename T>
__global__ void _ConcatGrad(
const int count,
const int outer_dim,
const int inner_dim,
const int x_concat_dim,
const int y_concat_dim,
const int concat_offset,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int tmp = x_concat_dim * inner_dim;
const int outer_idx = idx / tmp;
const int concat_idx = idx % tmp;
const int y_idx = (outer_idx * y_concat_dim + concat_offset)
* inner_dim + concat_idx;
dx[idx] = dy[y_idx];
}
}
template <> void ConcatGrad<float, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_concat_dim,
const int y_concat_dim,
const int concat_offset,
const float* dy,
float* dx,
CUDAContext* ctx) {
_ConcatGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_concat_dim, y_concat_dim,
concat_offset, dy, dx);
}
/******************** ndarray.crop ********************/
template<typename T>
__global__ void _Crop1D(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
y[idx] = x[(o * dim + ex_d + start) * inner_dim + i];
}
}
template<> void Crop1D<int, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int* x,
int* y,
CUDAContext* ctx) {
_Crop1D<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, start, x, y);
}
template<> void Crop1D<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const float* x,
float* y,
CUDAContext* ctx) {
_Crop1D<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, start, x, y);
}
template<typename T>
__global__ void _Crop1DGrad(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int end,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int d = (idx / inner_dim) % dim;
const int o = idx / inner_dim / dim;
dx[idx] = (d < start || d >= end) ? 0 :
dy[(o * ex_dim + d - start) * inner_dim + i];
}
}
template<> void Crop1DGrad<int, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int end,
const int* dy,
int* dx,
CUDAContext* ctx) {
_Crop1DGrad<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, start, end, dy, dx);
}
template<> void Crop1DGrad<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int end,
const float* dy,
float* dx,
CUDAContext* ctx) {
_Crop1DGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, start, end, dy, dx);
}
/******************** ndarray.pad ********************/
template <typename T>
__global__ void _ConstPad1D(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const T value,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
const int d = ex_d - pad_l;
y[idx] = (d < 0 || d >= dim) ? value :
x[(o * dim + d) * inner_dim + i];
}
}
template <> void ConstPad1D<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const float value,
const float* x,
float* y,
CUDAContext* ctx) {
_ConstPad1D<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, pad_l, value, x, y);
}
template <typename T>
__global__ void _ReflectPad1D(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
int d = ex_d - pad_l;
d = max(d, -d);
d = min(d, 2 * dim - d - 2);
y[idx] = x[(o * dim + d) * inner_dim + i];
}
}
template <> void ReflectPad1D<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const float* x,
float* y,
CUDAContext* ctx) {
_ReflectPad1D<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, pad_l, x, y);
}
template <typename T>
__global__ void _EdgePad1D(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
const int d = min(dim - 1, max(ex_d - pad_l, 0));
y[idx] = x[(o * dim + d) * inner_dim + i];
}
}
template <> void EdgePad1D<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const float* x,
float* y,
CUDAContext* ctx) {
_EdgePad1D<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, pad_l, x, y);
}
template <typename T>
__global__ void _ConstPad1DGrad(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % dim + pad_l;
const int o = idx / inner_dim / dim;
dx[idx] = dy[(o * ex_dim + ex_d) * inner_dim + i];
}
}
template <> void ConstPad1DGrad<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const float* dy,
float* dx,
CUDAContext* ctx) {
_ConstPad1DGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, pad_l, dy, dx);
}
template <typename T>
__global__ void _ReflectPad1DGrad(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
int d = ex_d - pad_l;
d = max(d, -d);
d = min(d, 2 * dim - d - 2);
atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]);
}
}
template <> void ReflectPad1DGrad<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const float* dy,
float* dx,
CUDAContext* ctx) {
_ReflectPad1DGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, pad_l, dy, dx);
}
template <typename T>
__global__ void _EdgePad1DGrad(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
const int d = min(dim - 1, max(ex_d - pad_l, 0));
atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]);
}
}
template <> void EdgePad1DGrad<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int pad_l,
const float* dy,
float* dx,
CUDAContext* ctx) {
_EdgePad1DGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
dim, ex_dim, inner_dim, pad_l, dy, dx);
}
/******************** ndarray.one_hot ********************/
template <typename T>
__global__ void _OneHot(
const int count,
const int depth,
const int on_value,
const float* x,
float* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int val = x[idx];
y[idx * depth + val] = on_value;
}
}
template <> void OneHot<float, CUDAContext>(
const int count,
const int depth,
const int on_value,
const float* x,
float* y,
CUDAContext* ctx) {
_OneHot<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
depth, on_value, x, y);
}
/******************** ndarray.reduce ********************/
template <typename T>
__global__ void _Sum(
const int count,
const int axis_dim,
const int inner_dim,
const T* x,
float* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
T sum_val = 0.0;
const int offset = (idx / inner_dim * axis_dim)
* inner_dim + idx % inner_dim;
for (int j = 0; j < axis_dim; j++)
sum_val += x[offset + j * inner_dim];
y[idx] = sum_val;
}
}
template<> void Sum<float, CUDAContext>(
const int count,
const int axis_dim,
const int inner_dim,
const float* x,
float* y,
CUDAContext* ctx) {
_Sum<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
axis_dim, inner_dim, x, y);
}
template <typename T>
__global__ void _SumGrad(
const int count,
const int axis_dim,
const int inner_dim,
const T coeff,
const T* dy,
float* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int offset = (idx / inner_dim * axis_dim)
* inner_dim + idx % inner_dim;
for (int j = 0; j < axis_dim; j++)
dx[offset + j * inner_dim] = dy[idx] * coeff;
}
}
template<> void SumGrad<float, CUDAContext>(
const int count,
const int axis_dim,
const int inner_dim,
const float coeff,
const float* dy,
float* dx,
CUDAContext* ctx) {
_SumGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
axis_dim, inner_dim, coeff, dy, dx);
}
/******************** ndarray.repeat ********************/
template <typename T>
__global__ void _Repeat(
const int count,
const int inner_dim,
const int repeats,
const int dim,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int d = idx % inner_dim;
const int b = (idx / inner_dim / repeats) % dim;
const int n = idx / inner_dim / repeats / dim;
const int x_idx = (n * dim + b) * inner_dim + d;
y[idx] = x[x_idx];
}
}
template <> void Repeat<float, CUDAContext>(
const int count,
const int outer_dim,
const int dim,
const int inner_dim,
const int repeats,
const float* x,
float* y,
CUDAContext* ctx) {
_Repeat<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
inner_dim, repeats, dim, x, y);
}
template <typename T>
__global__ void _RepeatGrad(
const int count,
const int inner_dim,
const int repeats,
const int dim,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int d = idx % inner_dim;
const int b = (idx / inner_dim) % dim;
const int n = idx / inner_dim / dim;
T gradient = 0;
for (int t = 0; t < repeats; t++)
gradient += dy[
(((n * dim + b) * repeats) + t)
* inner_dim + d];
dx[idx] = gradient;
}
}
template <> void RepeatGrad<float, CUDAContext>(
const int count,
const int outer_dim,
const int dim,
const int inner_dim,
const int repeats,
const float* dy,
float* dx,
CUDAContext* ctx) {
_RepeatGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, inner_dim, repeats, dim, dy, dx);
}
/******************** ndarray.slice ********************/
template <typename T>
__global__ void _Slice(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int slice_offset,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int tmp = y_slice_dim * inner_dim;
const int outer_idx = idx / tmp;
const int slice_idx = idx % tmp;
const int x_idx = (outer_idx * x_slice_dim + slice_offset)
* inner_dim + slice_idx;
y[idx] = x[x_idx];
}
}
template <> void Slice<float, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int slice_offset,
const float* x,
float* y,
CUDAContext* ctx) {
_Slice<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_slice_dim, y_slice_dim,
slice_offset, x, y);
}
template <typename T>
__global__ void _SliceGrad(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int slice_offset,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int tmp = y_slice_dim * inner_dim;
const int outer_idx = idx / tmp;
const int slice_idx = idx % tmp;
const int x_idx = (outer_idx * x_slice_dim + slice_offset)
* inner_dim + slice_idx;
dx[x_idx] = dy[idx];
}
}
template <> void SliceGrad<float, CUDAContext>(
const int count,
const int outer_dim,
const int inner_dim,
const int x_slice_dim,
const int y_slice_dim,
const int slice_offset,
const float* dy,
float* dx,
CUDAContext* ctx) {
_SliceGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
outer_dim, inner_dim,
x_slice_dim, y_slice_dim,
slice_offset, dy, dx);
}
/******************** ndarray.tile ********************/
template <typename T>
__global__ void _Tile(
const int count,
const int ex_inner_dim,
const int multiple,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int d = idx % ex_inner_dim;
const int n = idx / ex_inner_dim / multiple;
const int x_idx = n * ex_inner_dim + d;
y[idx] = x[x_idx];
}
}
template <> void Tile<float, CUDAContext>(
const int count,
const int outer_dim,
const int ex_inner_dim,
const int multiple,
const float* x,
float* y,
CUDAContext* ctx) {
_Tile<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
ex_inner_dim, multiple, x, y);
}
template <typename T>
__global__ void _TileGrad(
const int count,
const int ex_inner_dim,
const int multiple,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
T gradient = 0;
const int offset = (idx / ex_inner_dim * multiple)
* ex_inner_dim + idx % ex_inner_dim;
for (int t = 0; t < multiple; t++)
gradient += dy[offset + t * ex_inner_dim];
dx[idx] = gradient;
}
}
template <> void TileGrad<float, CUDAContext>(
const int count,
const int outer_dim,
const int ex_inner_dim,
const int multiple,
const float* dy,
float* dx,
CUDAContext* ctx) {
_TileGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, ex_inner_dim, multiple, dy, dx);
}
/******************** ndarray.transpose ********************/
template <typename T>
__global__ void _Transpose(
const int count,
const int ndim,
const int* order,
const int* old_steps,
const int* new_steps,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int x_idx = 0, y_idx = idx;
for (int j = 0; j < ndim; ++j) {
int k = order[j];
x_idx += (y_idx / new_steps[j]) * old_steps[k];
y_idx %= new_steps[j];
}
y[idx] = x[x_idx];
}
}
template <> void Transpose<float, CUDAContext>(
const int count,
const int ndim,
const int* order,
const int* old_steps,
const int* new_steps,
const float* x,
float* y,
CUDAContext* ctx) {
_Transpose<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
ndim, order, old_steps, new_steps, x, y);
}
template <typename T>
__global__ void _TransposeGrad(
const int count,
const int ndim,
const int* order,
const int* old_steps,
const int* new_steps,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int x_idx = 0, y_idx = idx;
for (int j = 0; j < ndim; ++j) {
int k = order[j];
x_idx += (y_idx / new_steps[j]) * old_steps[k];
y_idx %= new_steps[j];
}
dx[x_idx] = dy[idx];
}
}
template <> void TransposeGrad<float, CUDAContext>(
const int count,
const int ndim,
const int* order,
const int* old_steps,
const int* new_steps,
const float* dy,
float* dx,
CUDAContext* ctx) {
_TransposeGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
ndim, order, old_steps, new_steps, dy, dx);
}
/******************** recurrent.lstm_cell ********************/
template <typename T>
__global__ void _LSTMCellAct(
const int count,
const int c_offset,
const int x_offset,
T* xact) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int offset = idx % x_offset;
xact[idx] = offset < c_offset ?
_SigmoidUnit<float>(xact[idx]) : tanh(xact[idx]);
}
}
template <typename T>
__global__ void _LSTMCellGate(
const int count,
const int hidden_size,
const int o_offset, // 2 * hidden_size
const int c_offset, // 3 * hidden_size
const int x_offset, // 4 * hidden_size
const T* cx,
const T* xact,
T* c,
T* h) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int n = idx / hidden_size;
const int offset = idx % hidden_size;
const T* x = xact + n * x_offset;
const T i = x[offset];
const T f = x[offset + hidden_size];
const T o = x[offset + o_offset];
T c_ = x[offset + c_offset];
c_ = c[idx] = f * cx[idx] + i * c_;
h[idx] = o * tanh(c_);
}
}
template <> void LSTMCell<float, CUDAContext>(
const int count,
const int N,
const int C,
const float* cx,
float* xact,
float* c,
float* h,
CUDAContext* ctx) {
const int o_offset = 2 * C,
c_offset = 3 * C,
x_offset = 4 * C;
_LSTMCellAct<float>
<< < CUDA_BLOCKS(count * 4), CUDA_THREADS,
0, ctx->cuda_stream() >> > (count * 4,
c_offset, x_offset, xact);
_LSTMCellGate<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
C, o_offset, c_offset, x_offset,
cx, xact, c, h);
}
template <typename T>
__global__ void _LSTMCellGateGrad(
const int count,
const int hidden_size,
const int o_offset,
const int c_offset,
const int x_offset,
const T* cx,
const T* xact,
const T* c,
const T* dc,
const T* dh,
T* dcx,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int n = idx / hidden_size;
const int offset = idx % hidden_size;
const T* xact_ = xact + n * x_offset;
T* dx_ = dx + n * x_offset;
const T i = xact_[offset];
const T f = xact_[offset + hidden_size];
const T o = xact_[offset + o_offset];
const T g = xact_[offset + c_offset];
const T tanh_c = tanh(c[idx]);
const T dcx_sum_term =
dh[idx] * o * (1 - tanh_c * tanh_c) + dc[idx];
dcx[idx] = dcx_sum_term * f;
dx_[offset] = dcx_sum_term * g;
dx_[offset + hidden_size] = dcx_sum_term * cx[idx];
dx_[offset + o_offset] = dh[idx] * tanh_c;
dx_[offset + c_offset] = dcx_sum_term * i;
}
}
template <typename T>
__global__ void _LSTMCellActGrad(
const int count,
const int c_offset,
const int x_offset,
const T* xact,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int offset = idx % x_offset;
const T val = xact[idx];
if (offset < c_offset) dx[idx] = dx[idx] * val * (T(1) - val);
else dx[idx] = dx[idx] * (T(1) - val * val);
}
}
template <> void LSTMCellGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const float* cx,
const float* xact,
const float* c,
const float* dc,
const float* dh,
float* dcx,
float* dx,
CUDAContext* ctx) {
const int o_offset = 2 * C,
c_offset = 3 * C,
x_offset = 4 * C;
_LSTMCellGateGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
C, o_offset, c_offset, x_offset,
cx, xact, c, dc, dh, dcx, dx);
_LSTMCellActGrad<float>
<< < CUDA_BLOCKS(count * 4), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count * 4,
c_offset, x_offset, xact, dx);
}
/******************** update.adam_update ********************/
template <typename T>
__global__ void _AdamUpdate(
const int count,
const T lr,
const T beta1,
const T beta2,
const T eps,
T* g,
T* m,
T* v) {
CUDA_1D_KERNEL_LOOP(i, count) {
T gi = g[i];
T mi = m[i] = m[i] * beta1 + gi * (1 - beta1);
T vi = v[i] = v[i] * beta2 + gi * gi * (1 - beta2);
g[i] = lr * mi / (sqrt(vi) + eps);
}
}
template <> void AdamUpdate<float, CUDAContext>(
const int count,
const float lr,
const float beta1,
const float beta2,
const float eps,
float* g,
float* m,
float* v,
CUDAContext* ctx) {
_AdamUpdate<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> > (count,
lr, beta1, beta2, eps, g, m, v);
}
/******************** update.nesterov_update ********************/
template <typename T>
__global__ void _NesterovUpdate(
const int count,
const T lr,
const T momentum,
T* g,
T* h) {
CUDA_1D_KERNEL_LOOP(i, count) {
T hi = h[i];
T hi_new = h[i] = momentum * hi + lr * g[i];
g[i] = (1 + momentum) * hi_new - momentum * hi;
}
}
template <> void NesterovUpdate<float, CUDAContext>(
const int count,
const float lr,
const float momentum,
float* g,
float* h,
CUDAContext* ctx) {
_NesterovUpdate<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> > (count,
lr, momentum, g, h);
}
/******************** update.rmsprop_update ********************/
template <typename T>
__global__ void _RMSPropUpdate(
const int count,
const T lr,
const T decay,
const T eps,
T* g,
T* h) {
CUDA_1D_KERNEL_LOOP(i, count) {
T gi = g[i];
T hi = h[i] = decay * h[i] + (1 - decay) * gi * gi;
g[i] = lr * g[i] / (sqrt(hi) + eps);
}
}
template <> void RMSPropUpdate<float, CUDAContext>(
const int count,
const float lr,
const float decay,
const float eps,
float* g,
float* h,
CUDAContext* ctx) {
_RMSPropUpdate<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
lr, decay, eps, g, h);
}
/******************** update.sgd_update ********************/
template <typename T>
__global__ void _SGDUpdate(
const int count,
const T lr,
const T momentum,
T* g,
T* h) {
CUDA_1D_KERNEL_LOOP(i, count) {
T hi = h[i];
g[i] = h[i] = momentum * hi + lr * g[i];
}
}
template <> void SGDUpdate<float, CUDAContext>(
const int count,
const float lr,
const float momentum,
float* g,
float* h,
CUDAContext* ctx) {
_SGDUpdate<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
lr, momentum, g, h);
}
/******************** vision.bias_add ********************/
template <typename T>
__global__ void _BiasAdd_NCHW(
const int count,
const int dim,
const int inner_dim,
const T* bias,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] += bias[(idx / inner_dim) % dim];
}
}
template <typename T>
__global__ void _BiasAdd_NHWC(
const int count,
const int dim,
const int inner_dim,
const T* bias,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] += bias[idx % dim];
}
}
template<> void BiasAdd<float, CUDAContext>(
const int count,
const int outer_dim,
const int dim,
const int inner_dim,
const string& data_format,
const float* bias,
const float* bias_multiplier,
float* y,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_BiasAdd_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, dim, inner_dim, bias, y);
} else if (data_format == "NHWC") {
_BiasAdd_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, dim, inner_dim, bias, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
/******************** vision.bilinear_resize ********************/
template <typename T>
__global__ void _BilinearResize_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % out_w;
const int h = (idx / out_w) % out_h;
const int c = (idx / out_w / out_h) % C;
const int n = idx / out_w / out_w / C;
const float h_in = h * scale_h;
const int top_y_idx = floorf(h_in);
const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1;
const float y_lerp = h_in - top_y_idx;
const float w_in = w * scale_w;
const int left_x_idx = floorf(w_in);
const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1;
const float x_lerp = w_in - left_x_idx;
const int NCHT = (n * C + c) * H + top_y_idx;
const int NCHB = (n * C + c) * H + bottom_y_idx;
const float top_left(x[NCHT * W + left_x_idx]);
const float top_right(x[NCHT * W + right_x_idx]);
const float bottom_left(x[NCHB * W + left_x_idx]);
const float bottom_right(x[NCHB * W + right_x_idx]);
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
y[idx] = top + (bottom - top) * y_lerp;
}
}
template <typename T>
__global__ void _BilinearResize_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % out_w;
const int h = (idx / C / out_w) % out_h;
const int n = idx / C / out_w / out_h;
const float h_in = h * scale_h;
const int top_y_idx = floorf(h_in);
const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1;
const float y_lerp = h_in - top_y_idx;
const float w_in = w * scale_w;
const int left_x_idx = floorf(w_in);
const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1;
const float x_lerp = w_in - left_x_idx;
const int NHT = n * H + top_y_idx;
const int NHB = n * H + bottom_y_idx;
const float top_left(x[(NHT * W + left_x_idx) * C + c]);
const float top_right(x[(NHT * W + right_x_idx) * C + c]);
const float bottom_left(x[(NHB * W + left_x_idx) * C + c]);
const float bottom_right(x[(NHB * W + right_x_idx) * C + c]);
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
y[idx] = top + (bottom - top) * y_lerp;
}
}
template <> void BilinearResize<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const string& data_format,
const float* x,
float* y,
CUDAContext* ctx) {
const float scale_h = (float)H / out_h;
const float scale_w = (float)W / out_w;
if (data_format == "NCHW") {
_BilinearResize_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, x, y);
} else if(data_format == "NHWC") {
_BilinearResize_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, x, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template <typename T>
__global__ void _BilinearResizeGrad_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % out_w;
const int h = (idx / out_w) % out_h;
const int c = (idx / out_w / out_h) % C;
const int n = idx / out_w / out_w / C;
const float h_in = h * scale_h;
const int top_y_idx = floorf(h_in);
const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1;
const float y_lerp = h_in - top_y_idx;
const float w_in = w * scale_w;
const int left_x_idx = floorf(w_in);
const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1;
const float x_lerp = w_in - left_x_idx;
const int NCHT = (n * C + c) * H + top_y_idx;
const int NCHB = (n * C + c) * H + bottom_y_idx;
const float dtop = (1 - y_lerp) * dy[idx];
const float dbottom = y_lerp * dy[idx];
atomicAdd(&dx[NCHT * W + left_x_idx], static_cast<T>((1 - x_lerp) * dtop));
atomicAdd(&dx[NCHT * W + right_x_idx], static_cast<T>(x_lerp * dtop));
atomicAdd(&dx[NCHB * W + left_x_idx], static_cast<T>((1 - x_lerp) * dbottom));
atomicAdd(&dx[NCHB * W + right_x_idx], static_cast<T>(x_lerp * dbottom));
}
}
template <typename T>
__global__ void _BilinearResizeGrad_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % out_w;
const int h = (idx / C / out_w) % out_h;
const int n = idx / C / out_w / out_h;
const float h_in = h * scale_h;
const int top_y_idx = floorf(h_in);
const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1;
const float y_lerp = h_in - top_y_idx;
const float w_in = w * scale_w;
const int left_x_idx = floorf(w_in);
const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1;
const float x_lerp = w_in - left_x_idx;
const int NHT = n * H + top_y_idx;
const int NHB = n * H + bottom_y_idx;
const float dtop = (1 - y_lerp) * dy[idx];
const float dbottom = y_lerp * dy[idx];
atomicAdd(&dx[(NHT * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dtop));
atomicAdd(&dx[(NHT * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dtop));
atomicAdd(&dx[(NHB * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dbottom));
atomicAdd(&dx[(NHB * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dbottom));
}
}
template <> void BilinearResizeGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const string& data_format,
const float* dy,
float* dx,
CUDAContext* ctx) {
const float scale_h = (float)H / out_h;
const float scale_w = (float)W / out_w;
if (data_format == "NCHW") {
_BilinearResizeGrad_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, dy, dx);
} else if(data_format == "NHWC") {
_BilinearResizeGrad_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, dy, dx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
/******************** vision.conv ********************/
template<typename T>
__global__ void _Im2Col2d_NCHW(
const int count,
const int H,
const int W,
const int col_h,
const int col_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const T* im,
T* col) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % col_w;
const int h_idx = idx / col_w;
const int h = h_idx % col_h;
const int im_c = h_idx / col_h;
const int c = im_c * kernel_h * kernel_w;
const int im_h_off = h * stride_h - pad_h;
const int im_w_off = w * stride_w - pad_w;
T* col_ptr = col;
col_ptr += ((c * col_h + h) * col_w + w);
const T* im_ptr = im;
im_ptr += ((im_c * H + im_h_off) * W + im_w_off);
for (int kh = 0; kh < kernel_h; kh++) {
for (int kw = 0; kw < kernel_w; kw++) {
const int im_h = kh * dilation_h + im_h_off;
const int im_w = kw * dilation_w + im_w_off;
*col_ptr = (im_h >= 0 && im_w >= 0 && im_h < H && im_w < W) ?
im_ptr[kh * dilation_h * W + kw * dilation_w] : 0;
col_ptr += (col_h * col_w);
}
}
}
}
template<typename T>
__global__ void _Im2Col2d_NHWC(
const int count,
const int C,
const int H,
const int W,
const int col_h,
const int col_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const T* im,
T* col) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % col_w;
const int h = idx / C / col_w;
const int im_h_off = h * stride_h - pad_h;
const int im_w_off = w * stride_w - pad_w;
const int base_col_idx = (h * col_w) + w;
for (int kh = 0; kh < kernel_h; kh++) {
for (int kw = 0; kw < kernel_w; kw++) {
const int im_h = kh * dilation_h + im_h_off;
const int im_w = kw * dilation_w + im_w_off;
const int col_idx = (
((base_col_idx * kernel_h + kh) * kernel_w + kw) * C + c
);
col[col_idx] = (im_h >= 0 && im_w >= 0 &&
im_h < H && im_w < W) ? im[(im_h * W + im_w) * C + c] : 0;
}
}
}
}
template <> void Im2Col2d<float, CUDAContext>(
const int C,
const int H,
const int W,
const int col_h,
const int col_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const string& data_format,
const float* im,
float* col,
CUDAContext* ctx) {
if (data_format == "NCHW") {
const int count = (C * col_h * col_w);
_Im2Col2d_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
H, W, col_h, col_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w,
dilation_h, dilation_w, im, col);
} else if (data_format == "NHWC") {
const int count = (col_h * col_w * C);
_Im2Col2d_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
C, H, W, col_h, col_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w,
dilation_h, dilation_w, im, col);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template<typename T>
__global__ void _Col2Im2d_NCHW(
const int count,
const int H,
const int W,
const int col_h,
const int col_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const T* col,
T* im) {
CUDA_1D_KERNEL_LOOP(idx, count) {
T val = 0;
const int im_w = idx % W + pad_w;
const int im_h = (idx / W) % H + pad_h;
const int im_c = idx / W / H;
const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1;
const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1;
// redundant pixels will be ignored when conv
// note to clip them by min(x,col_w)
const int w_start = (im_w < ex_kernel_w) ?
0 : (im_w - ex_kernel_w) / stride_w + 1;
const int w_end = min(im_w / stride_w + 1, col_w);
const int h_start = (im_h < ex_kernel_h) ?
0 : (im_h - ex_kernel_h) / stride_h + 1;
const int h_end = min(im_h / stride_h + 1, col_h);
for (int h = h_start; h < h_end; ++h) {
for (int w = w_start; w < w_end; ++w) {
int kh_off = (im_h - h * stride_h);
int kw_off = (im_w - w * stride_w);
// only the serval im pixels used in dilated-conv
// ignore the corresponding col pixels
if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) {
kh_off /= dilation_h;
kw_off /= dilation_w;
const int col_idx = ((
(im_c * kernel_h + kh_off) * kernel_w + kw_off) * col_h + h
) * col_w + w;
val += col[col_idx];
}
}
}
im[idx] = val;
}
}
template<typename T>
__global__ void _Col2Im2d_NHWC(
const int count,
const int C,
const int H,
const int W,
const int col_h,
const int col_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const T* col,
T* im) {
CUDA_1D_KERNEL_LOOP(idx, count) {
T val = 0;
const int im_c = idx % C;
const int im_w = (idx / C) % W + pad_w;
const int im_h = (idx / C / W) + pad_h;
const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1;
const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1;
// redundant pixels will be ignored when conv
// note to clip them by min(x,col_w)
const int w_start = (im_w < ex_kernel_w) ?
0 : (im_w - ex_kernel_w) / stride_w + 1;
const int w_end = min(im_w / stride_w + 1, col_w);
const int h_start = (im_h < ex_kernel_h) ?
0 : (im_h - ex_kernel_h) / stride_h + 1;
const int h_end = min(im_h / stride_h + 1, col_h);
for (int h = h_start; h < h_end; ++h) {
for (int w = w_start; w < w_end; ++w) {
int kh_off = (im_h - h * stride_h);
int kw_off = (im_w - w * stride_w);
// only the serval im pixels used in dilated-conv
// ignore the corresponding col pixels
if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) {
kh_off /= dilation_h;
kw_off /= dilation_w;
const int col_idx = (
((h * col_w + w) * kernel_h + kh_off) * kernel_w + kw_off
) * C + im_c;
val += col[col_idx];
}
}
}
im[idx] = val;
}
}
template <> void Col2Im2d<float, CUDAContext>(
const int C,
const int H,
const int W,
const int col_h,
const int col_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const string& data_format,
const float* col,
float* im,
CUDAContext* ctx) {
if (data_format == "NCHW") {
const int count = (C * H * W);
_Col2Im2d_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
H, W, col_h, col_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w,
dilation_h, dilation_w, col, im);
} else if (data_format == "NHWC") {
const int count = (H * W * C);
_Col2Im2d_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
C, H, W, col_h, col_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w,
dilation_h, dilation_w, col, im);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
/******************** vision.nn_resize ********************/
template <typename T>
__global__ void _NNResize_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % out_w;
const int h = (idx / out_w) % out_h;
const int c = (idx / out_w / out_h) % C;
const int n = idx / out_w / out_h / C;
const int h_in = min(int(floorf(h * scale_h)), H - 1);
const int w_in = min(int(floorf(w * scale_w)), W - 1);
y[idx] = x[((n * C + c) * H + h_in) * W + w_in];
}
}
template <typename T>
__global__ void _NNResize_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % out_w;
const int h = (idx / C / out_w) % out_h;
const int n = idx / C / out_w / out_h;
const int h_in = min(int(floorf(h * scale_h)), H - 1);
const int w_in = min(int(floorf(w * scale_w)), W - 1);
y[idx] = x[((n * H + h_in) * W + w_in) * C + c];
}
}
template <> void NNResize<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const string& data_format,
const float* x,
float* y,
CUDAContext* ctx) {
const float scale_h = (float)H / out_h;
const float scale_w = (float)W / out_w;
if (data_format == "NCHW") {
_NNResize_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, x, y);
} else if(data_format == "NHWC") {
_NNResize_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, x, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template <typename T>
__global__ void _NNResizeGrad_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % out_w;
const int h = (idx / out_w) % out_h;
const int c = (idx / out_w / out_h) % C;
const int n = idx / out_w / out_h / C;
const int h_in = min(int(floorf(h * scale_h)), H - 1);
const int w_in = min(int(floorf(w * scale_w)), W - 1);
atomicAdd(&dx[((n * C + c) * H + h_in) * W + w_in], dy[idx]);
}
}
template <typename T>
__global__ void _NNResizeGrad_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const float scale_h,
const float scale_w,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % out_w;
const int h = (idx / C / out_w) % out_h;
const int n = idx / C / out_w / out_h;
const int h_in = min(int(floorf(h * scale_h)), H - 1);
const int w_in = min(int(floorf(w * scale_w)), W - 1);
atomicAdd(&dx[((n * H + h_in) * W + w_in) * C + c], dy[idx]);
}
}
template <> void NNResizeGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int out_h,
const int out_w,
const string& data_format,
const float* dy,
float* dx,
CUDAContext* ctx) {
const float scale_h = (float)H / out_h;
const float scale_w = (float)W / out_w;
if (data_format == "NCHW") {
_NNResizeGrad_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, dy, dx);
} else if(data_format == "NHWC") {
_NNResizeGrad_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(
count, N, C, H, W, out_h, out_w,
scale_h, scale_w, dy, dx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
/******************** vision.pooling ********************/
template<typename T>
__global__ void _MAXPooling2d_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* x,
int* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int pw = idx % pool_w;
const int ph = (idx / pool_w) % pool_h;
const int pc = (idx / pool_w / pool_h) % C;
const int pn = idx / pool_w / pool_h / C;
int start_h = ph * stride_h - pad_h;
int start_w = pw * stride_w - pad_w;
const int end_h = min(start_h + kernel_h, H);
const int end_w = min(start_w + kernel_w, W);
start_h = max(start_h, 0);
start_w = max(start_w, 0);
T max_val = -FLT_MAX;
int max_idx = -1;
const T* x_ptr = x + (pn * C + pc) * H * W;
for (int h = start_h; h < end_h; ++h) {
for (int w = start_w; w < end_w; ++w) {
if (x_ptr[h * W + w] > max_val) {
max_idx = h * W + w;
max_val = x_ptr[max_idx];
}
}
}
y[idx] = max_val;
mask[idx] = max_idx;
}
}
template<typename T>
__global__ void _MAXPooling2d_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* x,
int* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int pc = idx % C;
const int pw = (idx / C) % pool_w;
const int ph = (idx / C / pool_w) % pool_h;
const int pn = idx / C / pool_w / pool_h;
int start_h = ph * stride_h - pad_h;
int start_w = pw * stride_w - pad_w;
const int end_h = min(start_h + kernel_h, H);
const int end_w = min(start_w + kernel_w, W);
start_h = max(start_h, 0);
start_w = max(start_w, 0);
T max_val = -FLT_MAX;
int max_idx = -1;
for (int h = start_h; h < end_h; ++h) {
for (int w = start_w; w < end_w; ++w) {
const int x_idx = ((pn * H + h) * W + w) * C + pc;
if (x[x_idx] > max_val) {
max_idx = x_idx;
max_val = x[max_idx];
}
}
}
y[idx] = max_val;
mask[idx] = max_idx;
}
}
template<> void MAXPooling2d<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const string& data_format,
const float* x,
int* mask,
float* y,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_MAXPooling2d_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, x, mask, y);
} else if (data_format == "NHWC") {
_MAXPooling2d_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, x, mask, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template<typename T>
__global__ void _AVGPooling2d_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int pw = idx % pool_w;
const int ph = (idx / pool_w) % pool_h;
const int pc = (idx / pool_w / pool_h) % C;
const int pn = idx / pool_w / pool_h / C;
int start_h = ph * stride_h - pad_h;
int start_w = pw * stride_w - pad_w;
int end_h = min(start_h + kernel_h, H + pad_h);
int end_w = min(start_w + kernel_w, W + pad_w);
start_h = max(start_h, 0);
start_w = max(start_w, 0);
end_h = min(end_h, H);
end_w = min(end_w, W);
const T* x_ptr = x + (pn * C + pc) * H * W;
const int pool_area = (end_h - start_h) * (end_w - start_w);
T avg_val = 0;
for (int h = start_h; h < end_h; ++h) {
for (int w = start_w; w < end_w; ++w) {
avg_val += x_ptr[h * W + w];
}
}
y[idx] = avg_val / pool_area;
}
}
template<typename T>
__global__ void _AVGPooling2d_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int pc = idx % C;
const int pw = (idx / C) % pool_w;
const int ph = (idx / C / pool_w) % pool_h;
const int pn = idx / C / pool_w / pool_h;
int start_h = ph * stride_h - pad_h;
int start_w = pw * stride_w - pad_w;
int end_h = min(start_h + kernel_h, H + pad_h);
int end_w = min(start_w + kernel_w, W + pad_w);
start_h = max(start_h, 0);
start_w = max(start_w, 0);
end_h = min(end_h, H);
end_w = min(end_w, W);
const int pool_area = (end_h - start_h) * (end_w - start_w);
T avg_val = 0;
for (int h = start_h; h < end_h; ++h)
for (int w = start_w; w < end_w; ++w)
avg_val += x[((pn * H + h) * W + w) * C + pc];
y[idx] = avg_val / pool_area;
}
}
template<> void AVGPooling2d<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const string& data_format,
const float* x,
float* y,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_AVGPooling2d_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, x, y);
} else if (data_format == "NHWC") {
_AVGPooling2d_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, x, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template<typename T>
__global__ void _MAXPooling2dGrad_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* dy,
const int* mask,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % W;
const int h = (idx / W) % H;
const int c = (idx / W / H) % C;
const int n = idx / W / H / C;
// allow overlapping
const int start_ph = (h + pad_h < kernel_h) ?
0 : (h + pad_h - kernel_h) / stride_h + 1;
const int start_pw = (w + pad_w < kernel_w) ?
0 : (w + pad_w - kernel_w) / stride_w + 1;
// allow clip
const int end_ph = min((h + pad_h) / stride_h + 1, pool_h);
const int end_pw = min((w + pad_w) / stride_w + 1, pool_w);
T grad = 0;
const int offset = (n * C + c) * pool_h * pool_w;
const T* dy_ptr = dy + offset;
const int* mask_ptr = mask + offset;
for (int ph = start_ph; ph < end_ph; ++ph) {
for (int pw = start_pw; pw < end_pw; ++pw) {
if (mask_ptr[ph * pool_w + pw] == (h * W + w)) {
grad += dy_ptr[ph * pool_w + pw];
}
}
}
dx[idx] = grad;
}
}
template<typename T>
__global__ void _MAXPooling2dGrad_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* dy,
const int* mask,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % W;
const int h = (idx / C / W) % H;
const int n = idx / C / W / H;
// allow overlapping
const int start_ph = (h + pad_h < kernel_h) ?
0 : (h + pad_h - kernel_h) / stride_h + 1;
const int start_pw = (w + pad_w < kernel_w) ?
0 : (w + pad_w - kernel_w) / stride_w + 1;
// allow clip
const int end_ph = min((h + pad_h) / stride_h + 1, pool_h);
const int end_pw = min((w + pad_w) / stride_w + 1, pool_w);
T grad = 0;
for (int ph = start_ph; ph < end_ph; ++ph) {
for (int pw = start_pw; pw < end_pw; ++pw) {
const int x_idx = ((n * H + h) * W + w) * C + c;
const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c;
if (mask[y_idx] == x_idx) grad += dy[y_idx];
}
}
dx[idx] = grad;
}
}
template<> void MAXPooling2dGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const string& data_format,
const float* dy,
const int* mask,
float* dx,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_MAXPooling2dGrad_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, dy,mask, dx);
} else if (data_format == "NHWC") {
_MAXPooling2dGrad_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, dy, mask, dx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
template<typename T>
__global__ void _AVGPooling2dGrad_NCHW(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int w = idx % W;
const int h = (idx / W) % H;
const int c = (idx / W / H) % C;
const int n = idx / W / H / C;
const int start_ph = (h + pad_h < kernel_h) ?
0 : (h + pad_h - kernel_h) / stride_h + 1;
const int start_pw = (w + pad_w < kernel_w) ?
0 : (w + pad_w - kernel_w) / stride_w + 1;
const int end_ph = min(h / stride_h + 1, pool_h);
const int end_pw = min(w / stride_w + 1, pool_w);
T grad = 0;
const T* dy_ptr = dy + (n * C + c) * pool_h * pool_w;
for (int ph = start_ph; ph < end_ph; ++ph) {
for (int pw = start_pw; pw < end_pw; ++pw) {
int start_h = ph * stride_h - pad_h;
int start_w = pw * stride_w - pad_w;
int end_h = min(start_h + kernel_h, H + pad_h);
int end_w = min(start_w + kernel_w, W + pad_w);
int pool_area = (end_h - start_h) * (end_w - start_w);
grad += (dy_ptr[ph * pool_w + pw] / pool_area);
}
}
dx[idx] = grad;
}
}
template<typename T>
__global__ void _AVGPooling2dGrad_NHWC(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % C;
const int w = (idx / C) % W;
const int h = (idx / C / W) % H;
const int n = idx / C / W / H;
const int start_ph = (h + pad_h < kernel_h) ?
0 : (h + pad_h - kernel_h) / stride_h + 1;
const int start_pw = (w + pad_w < kernel_w) ?
0 : (w + pad_w - kernel_w) / stride_w + 1;
const int end_ph = min(h / stride_h + 1, pool_h);
const int end_pw = min(w / stride_w + 1, pool_w);
T grad = 0;
for (int ph = start_ph; ph < end_ph; ++ph) {
for (int pw = start_pw; pw < end_pw; ++pw) {
int start_h = ph * stride_h - pad_h;
int start_w = pw * stride_w - pad_w;
int end_h = min(start_h + kernel_h, H + pad_h);
int end_w = min(start_w + kernel_w, W + pad_w);
int pool_area = (end_h - start_h) * (end_w - start_w);
const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c;
grad += (dy[y_idx] / pool_area);
}
}
dx[idx] = grad;
}
}
template<> void AVGPooling2dGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const string& data_format,
const float* dy,
float* dx,
CUDAContext* ctx) {
if (data_format == "NCHW") {
_AVGPooling2dGrad_NCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, dy, dx);
} else if (data_format == "NHWC") {
_AVGPooling2dGrad_NHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
N, C, H, W, pool_h, pool_w, kernel_h, kernel_w,
stride_h, stride_w, pad_h, pad_w, dy, dx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
/******************** vision.roi_pooling ********************/
template <typename T>
__global__ void _ROIPooling(
const int count,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pool_h,
const int pool_w,
const T* x,
const T* rois,
int* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int pw = idx % pool_w;
int ph = (idx / pool_w) % pool_h;
int c = (idx / pool_w / pool_h) % channels;
int n = idx / pool_w / pool_h / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
if (roi_batch_ind < 0) {
y[idx] = 0;
mask[idx] = -1;
continue;
}
int roi_start_w = round(offset_rois[1] * spatial_scale);
int roi_start_h = round(offset_rois[2] * spatial_scale);
int roi_end_w = round(offset_rois[3] * spatial_scale);
int roi_end_h = round(offset_rois[4] * spatial_scale);
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
const T bin_size_h = (T)roi_height / (T)pool_h;
const T bin_size_w = (T)roi_width / (T)pool_w;
int hstart = floor(bin_size_h * ph);
int wstart = floor(bin_size_w * pw);
int hend = ceil(bin_size_h * (ph + 1));
int wend = ceil(bin_size_w * (pw + 1));
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
float max_val = is_empty ? 0 : -FLT_MAX;
int max_idx = -1;
x += ((roi_batch_ind * channels + c) * height * width);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
const int x_idx = h * width + w;
if (x[x_idx] > max_val) {
max_val = x[x_idx];
max_idx = x_idx;
}
}
}
y[idx] = max_val;
mask[idx] = max_idx;
}
}
template<> void ROIPooling<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int num_rois,
const float spatial_scale,
const float* x,
const float* rois,
int* mask,
float* y,
CUDAContext* ctx) {
_ROIPooling<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
spatial_scale, C, H, W,
pool_h, pool_w, x, rois, mask, y);
}
template <typename T>
__global__ void _ROIPoolingGrad(
const int count,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pool_h,
const int pool_w,
const T* dy,
const T* rois,
const int* mask,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int w = idx % width;
int h = (idx / width) % height;
int c = (idx / width / height) % channels;
int n = idx / width / height / channels;
T gradient = 0;
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const T* offset_rois = rois + roi_n * 5;
int roi_batch_ind = offset_rois[0];
if (n != roi_batch_ind) continue;
int roi_start_w = round(offset_rois[1] * spatial_scale);
int roi_start_h = round(offset_rois[2] * spatial_scale);
int roi_end_w = round(offset_rois[3] * spatial_scale);
int roi_end_h = round(offset_rois[4] * spatial_scale);
const bool in_roi = (w >= roi_start_w &&
w <= roi_end_w &&
h >= roi_start_h &&
h <= roi_end_h);
if (!in_roi) continue;
int y_offset = (roi_n * channels + c) * pool_h * pool_w;
const T* offset_dy = dy + y_offset;
const int* offset_mask = mask + y_offset;
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
const T bin_size_h = (T)roi_height / (T)pool_h;
const T bin_size_w = (T)roi_width / (T)pool_w;
int phstart = floor(static_cast<T>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<T>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<T>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<T>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pool_h);
phend = min(max(phend, 0), pool_h);
pwstart = min(max(pwstart, 0), pool_w);
pwend = min(max(pwend, 0), pool_w);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int pool_idx = ph * pool_w + pw;
if (offset_mask[pool_idx] == (h * width + w)) {
gradient += offset_dy[pool_idx];
}
}
}
}
dx[idx] = gradient;
}
}
template<> void ROIPoolingGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int num_rois,
const float spatial_scale,
const float* dy,
const float* rois,
const int* mask,
float* dx,
CUDAContext* ctx) {
_ROIPoolingGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
num_rois, spatial_scale, C, H, W,
pool_h, pool_w, dy, rois, mask, dx);
}
/******************** vision.roi_align ********************/
template <typename T>
__device__ T _ROIAlignInterpolate(
const T* Xdata,
const int height,
const int width,
T y,
T x) {
if (y < -1.0 || y > height || x < -1.0 || x > width) return 0;
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
T v1 = Xdata[y_low * width + x_low];
T v2 = Xdata[y_low * width + x_high];
T v3 = Xdata[y_high * width + x_low];
T v4 = Xdata[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void _ROIAlign(
const int count,
const float spatial_scale,
const int channels,
const int height,
const int width,
const int pool_h,
const int pool_w,
const int sampling_ratio,
const T* Xdata,
const T* rois,
T* Ydata) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int pw = idx % pool_w;
int ph = (idx / pool_w) % pool_h;
int c = (idx / pool_w / pool_h) % channels;
int n = idx / pool_w / pool_h / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
if (roi_batch_ind < 0) {
Ydata[idx] = 0;
continue;
}
T roi_start_w = offset_rois[1] * spatial_scale;
T roi_start_h = offset_rois[2] * spatial_scale;
T roi_end_w = offset_rois[3] * spatial_scale;
T roi_end_h = offset_rois[4] * spatial_scale;
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w);
const T* offset_Xdata = Xdata +(roi_batch_ind * channels + c) * height * width;
int roi_bin_grid_h = (sampling_ratio > 0) ?
sampling_ratio : ceil(roi_height / pool_h);
int roi_bin_grid_w = (sampling_ratio > 0) ?
sampling_ratio : ceil(roi_width / pool_w);
const T num_bin_grids = roi_bin_grid_h * roi_bin_grid_w;
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = _ROIAlignInterpolate(offset_Xdata, height, width, y, x);
output_val += val;
}
}
output_val /= num_bin_grids;
Ydata[idx] = output_val;
}
}
template<> void ROIAlign<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int num_rois,
const float spatial_scale,
const int sampling_ratio,
const float* x,
const float* rois,
float* y,
CUDAContext* ctx) {
_ROIAlign<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
spatial_scale, C, H, W, pool_h, pool_w,
sampling_ratio, x, rois, y);
}
template <typename T>
__device__ void _ROIAlignInterpolateGrad(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high) {
if (y < -1.0 || y > height ||
x < -1.0 || x > width) {
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void _ROIAlignGrad(
const int count,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pool_h,
const int pool_w,
const int sampling_ratio,
const T* dYdata,
const T* rois,
T* dXdata) {
CUDA_1D_KERNEL_LOOP(idx, count) {
int pw = idx % pool_w;
int ph = (idx / pool_w) % pool_h;
int c = (idx / pool_w / pool_h) % channels;
int n = idx / pool_w / pool_h / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
if (roi_batch_ind < 0) continue;
T roi_start_w = offset_rois[1] * spatial_scale;
T roi_start_h = offset_rois[2] * spatial_scale;
T roi_end_w = offset_rois[3] * spatial_scale;
T roi_end_h = offset_rois[4] * spatial_scale;
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w);
T* offset_dXdata = dXdata +
(roi_batch_ind * channels + c) * height * width;
int y_offset = (n * channels + c) * pool_h * pool_w;
const T* offset_dYdata = dYdata + y_offset;
const T dYdata_this_bin = offset_dYdata[ph * pool_w + pw];
int roi_bin_grid_h = (sampling_ratio > 0) ?
sampling_ratio : ceil(roi_height / pool_h);
int roi_bin_grid_w = (sampling_ratio > 0) ?
sampling_ratio : ceil(roi_width / pool_w);
const T num_bin_grids = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
_ROIAlignInterpolateGrad(
height, width, y, x, w1, w2, w3, w4,
x_low, x_high, y_low, y_high);
T g1 = dYdata_this_bin * w1 / num_bin_grids;
T g2 = dYdata_this_bin * w2 / num_bin_grids;
T g3 = dYdata_this_bin * w3 / num_bin_grids;
T g4 = dYdata_this_bin * w4 / num_bin_grids;
if (x_low >= 0 && x_high >= 0
&& y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_dXdata + y_low * width + x_low,
static_cast<T>(g1));
atomicAdd(
offset_dXdata + y_low * width + x_high,
static_cast<T>(g2));
atomicAdd(
offset_dXdata + y_high * width + x_low,
static_cast<T>(g3));
atomicAdd(
offset_dXdata + y_high * width + x_high,
static_cast<T>(g4));
}
}
}
}
}
template<> void ROIAlignGrad<float, CUDAContext>(
const int count,
const int N,
const int C,
const int H,
const int W,
const int pool_h,
const int pool_w,
const int num_rois,
const float spatial_scale,
const int sampling_ratio,
const float* dy,
const float* rois,
float* dx,
CUDAContext* ctx) {
_ROIAlignGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >(count,
num_rois, spatial_scale, C, H, W,
pool_h, pool_w, sampling_ratio, dy, rois, dx);
}
} // namespace kernel
} // namespace dragon
#endif // WITH_CUDA |
e74e27d6c3168eb35f2e5252e78a85691d900d76.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int jump_period, const int jump_step,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
//const int h_offset = h_col * stride_h - pad_h;
//const int w_offset = w_col * stride_w - pad_w;
int h_offset = (h_col / dilation_h) * (dilation_h * stride_h) + (h_col % dilation_h) - pad_h;
h_offset += (h_offset / jump_period) * jump_step;
int w_offset = (w_col / dilation_w) * (dilation_w * stride_w) + (w_col % dilation_w) - pad_w;
w_offset += (w_offset / jump_period) * jump_step;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int jump_period, const int jump_step,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
/*int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;*/
int height_col = jump_step == 0? (height + 2 * pad_h - kernel_h * dilation_h) / stride_h + dilation_h :
height * jump_period / (jump_period + jump_step);
int width_col = jump_step == 0? (width + 2 * pad_w - kernel_w * dilation_w) / stride_w + dilation_w :
width * jump_period / (jump_period + jump_step);
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, jump_period, jump_step,
height_col, width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int jump_period, const int jump_step, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int jump_period, const int jump_step, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int jump_period, const int jump_step,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
/*Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}*/
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
// compute the start and end of the output
int w_col_start, h_col_start, w_col_end, h_col_end;
int jj = jump_period + jump_step;
if (jump_step != 0) {
w_col_start =
w_im / jj * jump_period + max(-1, (w_im % jj)-kernel_w) + 1;
w_col_end = min(w_im / jj * jump_period + (w_im % jj) + 1, width_col);
h_col_start =
h_im / jj * jump_period + max(-1, (h_im % jj)-kernel_h) + 1;
h_col_end = min(h_im / jj * jump_period + (h_im % jj) + 1, height_col);
}
else {
w_col_start = (w_im < dilation_w * kernel_w) ? (w_im % dilation_w) :
((w_im - dilation_w*kernel_w) / (dilation_w*stride_w) + 1) * dilation_w + (w_im % dilation_w);
w_col_end =
min((w_im / (dilation_w*stride_w)+1) * dilation_w + (w_im % dilation_w), width_col);
h_col_start = (h_im < dilation_h * kernel_h) ? (h_im % dilation_h) :
((h_im - dilation_h*kernel_h) / (dilation_h*stride_h) + 1) * dilation_h + (h_im % dilation_h);
h_col_end =
min((h_im / (dilation_h*stride_h)+1) * dilation_h + (h_im % dilation_h), height_col);
}
// original implementation
for (int h_col = h_col_start; h_col < h_col_end; h_col += dilation_h) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += dilation_w) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c_im * kernel_h * kernel_w
+ ((h_im/jj*jump_period + h_im%jj) - h_col * stride_h)/dilation_h * kernel_w
+ ((w_im/jj*jump_period + w_im%jj) - w_col * stride_w)/dilation_w;
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int jump_period, const int jump_step,
Dtype* data_im) {
/*int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;*/
int height_col = jump_step == 0? (height + 2 * pad_h - kernel_h * dilation_h) / stride_h + dilation_h :
height * jump_period / (jump_period + jump_step);
int width_col = jump_step == 0? (width + 2 * pad_w - kernel_w * dilation_w) / stride_w + dilation_w :
width * jump_period / (jump_period + jump_step);
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, jump_period, jump_step,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int jump_period, const int jump_step,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int jump_period, const int jump_step,
double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
| e74e27d6c3168eb35f2e5252e78a85691d900d76.cu | #include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int jump_period, const int jump_step,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
//const int h_offset = h_col * stride_h - pad_h;
//const int w_offset = w_col * stride_w - pad_w;
int h_offset = (h_col / dilation_h) * (dilation_h * stride_h) + (h_col % dilation_h) - pad_h;
h_offset += (h_offset / jump_period) * jump_step;
int w_offset = (w_col / dilation_w) * (dilation_w * stride_w) + (w_col % dilation_w) - pad_w;
w_offset += (w_offset / jump_period) * jump_step;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int jump_period, const int jump_step,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
/*int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;*/
int height_col = jump_step == 0? (height + 2 * pad_h - kernel_h * dilation_h) / stride_h + dilation_h :
height * jump_period / (jump_period + jump_step);
int width_col = jump_step == 0? (width + 2 * pad_w - kernel_w * dilation_w) / stride_w + dilation_w :
width * jump_period / (jump_period + jump_step);
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, jump_period, jump_step,
height_col, width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int jump_period, const int jump_step, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int jump_period, const int jump_step, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int jump_period, const int jump_step,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
/*Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}*/
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
// compute the start and end of the output
int w_col_start, h_col_start, w_col_end, h_col_end;
int jj = jump_period + jump_step;
if (jump_step != 0) {
w_col_start =
w_im / jj * jump_period + max(-1, (w_im % jj)-kernel_w) + 1;
w_col_end = min(w_im / jj * jump_period + (w_im % jj) + 1, width_col);
h_col_start =
h_im / jj * jump_period + max(-1, (h_im % jj)-kernel_h) + 1;
h_col_end = min(h_im / jj * jump_period + (h_im % jj) + 1, height_col);
}
else {
w_col_start = (w_im < dilation_w * kernel_w) ? (w_im % dilation_w) :
((w_im - dilation_w*kernel_w) / (dilation_w*stride_w) + 1) * dilation_w + (w_im % dilation_w);
w_col_end =
min((w_im / (dilation_w*stride_w)+1) * dilation_w + (w_im % dilation_w), width_col);
h_col_start = (h_im < dilation_h * kernel_h) ? (h_im % dilation_h) :
((h_im - dilation_h*kernel_h) / (dilation_h*stride_h) + 1) * dilation_h + (h_im % dilation_h);
h_col_end =
min((h_im / (dilation_h*stride_h)+1) * dilation_h + (h_im % dilation_h), height_col);
}
// original implementation
for (int h_col = h_col_start; h_col < h_col_end; h_col += dilation_h) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += dilation_w) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c_im * kernel_h * kernel_w
+ ((h_im/jj*jump_period + h_im%jj) - h_col * stride_h)/dilation_h * kernel_w
+ ((w_im/jj*jump_period + w_im%jj) - w_col * stride_w)/dilation_w;
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int jump_period, const int jump_step,
Dtype* data_im) {
/*int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;*/
int height_col = jump_step == 0? (height + 2 * pad_h - kernel_h * dilation_h) / stride_h + dilation_h :
height * jump_period / (jump_period + jump_step);
int width_col = jump_step == 0? (width + 2 * pad_w - kernel_w * dilation_w) / stride_w + dilation_w :
width * jump_period / (jump_period + jump_step);
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, jump_period, jump_step,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int jump_period, const int jump_step,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int jump_period, const int jump_step,
double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
|
8bff4da2c744ecc12e7af6f47d261a283e3f2c97.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file batch_norm.cu
* \brief CUDA Batch Normalization code
* \author Chris Olivier, Bing Xu, Da Zheng
* Adapted from Torch
*/
#include <hip/hip_runtime_api.h>
#include <algorithm>
#include "batch_norm-inl.h"
#include "batch_norm_add_relu-inl.h"
#define WRITE_DATA_FLAG 1
#define WRITE_GAMMA_FLAG 2
#define WRITE_BETA_FLAG 4
#define FIX_GAMMA_FLAG 8
#define IS_TRAINING_FLAG 16
#define USE_GLOBAL_STATS_FLAG 32
#if MXNET_USE_CUDNN == 1
#include "./cudnn/nhwc_batch_norm_add_relu-inl.h"
#endif
#include "../../common/cuda/utils.h"
#include "../../../include/mxnet/tensor_blob.h"
using namespace mxnet;
/*! \brief inverse standard deviation <-> variance */
#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/sqrt((__var$) + DType(__eps$)))
#define INVSTD_TO_VARIANCE(__invstd$, __eps$) ((1.0 / ((__invstd$) * (__invstd$))) - (__eps$))
namespace mxnet {
namespace op {
namespace batchnormaddrelu {
namespace cuda {
static const unsigned WARP_SIZE = 32;
// The maximum number of threads in a block
static const unsigned MAX_BLOCK_SIZE = 512U;
template<typename In, typename Out>
struct ScalarConvert {
static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; }
};
// Number of threads in a block given an input size up to MAX_BLOCK_SIZE
static unsigned getNumThreads(int nElem, const bool smaller) {
unsigned threadSizes[5] = {32, 64, 128, 256, MAX_BLOCK_SIZE};
const int maxi = smaller ? 4 : 5;
for (int i = 0; i != maxi; ++i) {
if (static_cast<unsigned>(nElem) <= threadSizes[i]) {
return threadSizes[i];
}
}
return smaller ? (MAX_BLOCK_SIZE >> 1) : MAX_BLOCK_SIZE;
}
// Returns the index of the most significant 1 bit in `val`.
__device__ __forceinline__ int getMSB(int val) {
return 31 - __clz(val);
}
template<typename DType, typename AccReal>
struct Float2 {
AccReal v1, v2;
__device__ Float2() {}
__device__ Float2(DType v1, DType v2)
: v1(ScalarConvert<DType, AccReal>::to(v1))
, v2(ScalarConvert<DType, AccReal>::to(v2)) {}
__device__ Float2(DType v)
: v1(ScalarConvert<DType, AccReal>::to(v))
, v2(ScalarConvert<DType, AccReal>::to(v)) {}
__device__ Float2(int v)
: v1(ScalarConvert<int, AccReal>::to(v))
, v2(ScalarConvert<int, AccReal>::to(v)) {}
__device__ Float2 &operator+=(const Float2 &a) {
v1 += a.v1;
v2 += a.v2;
return *this;
}
};
template<typename DType, typename AccReal, typename DeviceTensor>
struct SumOp {
__device__ SumOp(const DeviceTensor t) : tensor(t) {}
__device__ __forceinline__ AccReal operator()(int batch, int plane, int n) {
return ScalarConvert<DType, AccReal>::to(tensor.get_ref(batch, plane, n));
}
const DeviceTensor tensor;
};
template<typename DType, typename AccReal, typename DeviceTensor>
struct VarOp {
__device__ VarOp(AccReal m, const DeviceTensor t)
: mean(m)
, tensor(t) {
}
__device__ __forceinline__ AccReal operator()(int batch, int plane, int n) {
DType val = tensor.get_ref(batch, plane, n);
return (val - mean) * (val - mean);
}
const AccReal mean;
const DeviceTensor tensor;
};
template<typename DType, typename AccReal, typename DeviceTensor>
struct GradOp {
__device__ GradOp(AccReal m, const DeviceTensor i, const DeviceTensor g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, AccReal> operator()(int batch, int plane, int n) {
const DType g = gradOutput.get_ref(batch, plane, n);
const DType c = ScalarConvert<AccReal, DType>::to(input.get_ref(batch, plane, n) - mean);
return Float2<DType, AccReal>(g, g * c);
}
const AccReal mean;
const DeviceTensor input;
const DeviceTensor gradOutput;
};
#if TORCH_HIP_VERSION >= 9000
#define FULLMASK 0xFFFFFFFF
#define __shfl_xor(...) __shfl_xor_sync(FULLMASK, __VA_ARGS__)
#endif
// Sum across all threads within a warp
template<typename T>
static __device__ __forceinline__ T warpSum(T val) {
#if __CUDA_ARCH__ >= 300
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
val += __shfl_xor(val, 1 << i, WARP_SIZE);
}
#else
__shared__ T values[MAX_BLOCK_SIZE];
values[threadIdx.x] = val;
__threadfence_block();
const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE;
for (int i = 1; i < WARP_SIZE; i++) {
val += values[base + ((i + threadIdx.x) % WARP_SIZE)];
}
#endif
return val;
}
template<typename DType, typename AccReal>
static __device__ __forceinline__ Float2<DType, AccReal> warpSum(Float2<DType, AccReal> value) {
value.v1 = warpSum(value.v1);
value.v2 = warpSum(value.v2);
return value;
}
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor>
static __device__ T reduce(Op op, DeviceTensor tensor, int plane) {
T sum = (T) 0;
for (int batch = 0; batch < tensor.OuterSize(); ++batch) {
for (int x = threadIdx.x; x < tensor.InnerSize(); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T) 0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
__global__ void BatchNormAddRelualizationUpdateOutputInferenceKernel(
DeviceTensor input,
DeviceTensor output,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveInvStd,
DeviceTensor1 weight,
DeviceTensor1 bias,
const DType epsilon,
const uint32_t flags) {
int plane = blockIdx.x;
AccReal invstd = VARIANCE_TO_INVSTD(runningVar[plane], epsilon);
AccReal mean = ScalarConvert<DType, AccReal>::to(runningMean[plane]);
AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0)
? ScalarConvert<DType, AccReal>::to(weight[plane])
: ScalarConvert<int, AccReal>::to(1);
AccReal beta = bias.numElements() > 0 ? ScalarConvert<DType, AccReal>::to(bias[plane])
: ScalarConvert<int, AccReal>::to(0);
if (threadIdx.x == 0) {
saveMean[plane] = runningMean[plane];
saveInvStd[plane] = VARIANCE_TO_INVSTD(runningVar[plane], epsilon);
if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0
&& weight.numElements() > 0) {
weight[plane] = AccReal(1);
}
}
// Write normalized and update the output
for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) {
const DType inp = input.get_ref(batch, plane, x);
output.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to(gamma * (inp - mean) * invstd + beta);
}
}
}
template<typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
__global__ void BatchNormAddRelualizationUpdateOutputKernel(
DeviceTensor input,
DeviceTensor output,
DeviceTensor1 weight,
DeviceTensor1 bias,
const AccReal epsilon,
const AccReal momentum,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveInvStd,
const uint32_t flags) {
const int plane = blockIdx.x;
const int N = input.OuterSize() * input.InnerSize();
const AccReal norm = AccReal(1) / N;
// Compute the mean and variance across (batch, x/y/z)
const AccReal mean = reduce<AccReal>(
SumOp<DType, AccReal, DeviceTensor>(input), input, plane) * norm;
__syncthreads();
const AccReal varN = reduce<AccReal>(VarOp<DType, AccReal, DeviceTensor>(mean, input),
input, plane);
AccReal invStd = 0;
if (varN != AccReal(0) || epsilon != AccReal(0)) {
invStd = AccReal(1.0) / sqrt(varN * norm + epsilon);
}
// Save the mean, variance, and moving averages
if (threadIdx.x == 0) {
// For one item (0th) per plane (channel), write the per-channel data (ie mean, variance, etc)
// Momentum based writeback
saveMean[plane] = ScalarConvert<AccReal, DType>::to(mean);
saveInvStd[plane] = invStd;
if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0
&& weight.numElements() > 0) {
weight[plane] = AccReal(1);
}
}
// Write normalized and update the output
const AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0)
? ScalarConvert<DType, AccReal>::to(weight[plane])
: ScalarConvert<int, AccReal>::to(1);
const AccReal beta = bias.numElements() > 0 ? ScalarConvert<DType, AccReal>::to(bias[plane])
: ScalarConvert<int, AccReal>::to(0);
for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) {
const DType inp = input.get_ref(batch, plane, x);
output.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to(gamma * (inp - mean) * invStd + beta);
}
}
}
template<typename DeviceTensor1>
struct CUDATensors {
DeviceTensor1 gradWeight;
DeviceTensor1 gradBias;
DeviceTensor1 weight;
DeviceTensor1 runningMean;
DeviceTensor1 runningVar;
DeviceTensor1 saveMean;
DeviceTensor1 saveInvStd;
};
template<typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
static __global__ void BatchNormAddRelualizationBackwardKernel(
const DeviceTensor input,
const DeviceTensor gradOutput,
DeviceTensor gradInput,
CUDATensors<DeviceTensor1> tensors,
const uint32_t flags,
const AccReal momentum,
const double eps) {
int plane = blockIdx.x;
int N = gradOutput.OuterSize() * gradOutput.InnerSize();
const bool is_train_and_not_global_stats =
(flags & IS_TRAINING_FLAG) != 0 && (flags & USE_GLOBAL_STATS_FLAG) == 0;
AccReal mean, invstd;
if (is_train_and_not_global_stats) {
mean = ScalarConvert<DType, AccReal>::to(tensors.saveMean[plane]);
invstd = tensors.saveInvStd[plane];
} else {
mean = ScalarConvert<DType, AccReal>::to(tensors.runningMean[plane]);
invstd = VARIANCE_TO_INVSTD(tensors.runningVar[plane], eps);
}
const AccReal weightVal = ((flags & FIX_GAMMA_FLAG) == 0 && tensors.weight.numElements() > 0) ?
ScalarConvert<DType, AccReal>::to(tensors.weight[plane]) : AccReal(1);
const AccReal norm = AccReal(1) / N;
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(gradOutput)
// 2. DotProduct(input - mean, gradOutput)
GradOp<DType, AccReal, DeviceTensor> g(mean, input, gradOutput);
Float2< DType, AccReal > res = reduce < Float2 < DType, AccReal >,
GradOp< DType, AccReal, DeviceTensor >, DeviceTensor > (g, gradOutput, plane);
const AccReal gradOutputSum = res.v1;
const AccReal dotP = res.v2;
const AccReal gradMean = gradOutputSum * norm;
const AccReal projScale = dotP * norm * invstd * invstd;
const AccReal gradScale = invstd * weightVal;
if (threadIdx.x == 0 && is_train_and_not_global_stats) {
const AccReal localVariance = INVSTD_TO_VARIANCE(tensors.saveInvStd[plane], eps);
const AccReal localMean = tensors.saveMean[plane];
// update running averages
tensors.runningMean[plane] = tensors.runningMean[plane]
* momentum + localMean * (AccReal(1) - momentum);
tensors.runningVar[plane] = tensors.runningVar[plane]
* momentum + localVariance * (AccReal(1) - momentum);
}
if (gradInput.Size() > 0 && (flags & WRITE_DATA_FLAG) != 0) {
for (int batch = 0, nbatch = gradOutput.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = gradOutput.InnerSize(); x < nx; x += blockDim.x) {
const DType gradOut = gradOutput.get_ref(batch, plane, x);
if (is_train_and_not_global_stats) {
const DType inp = input.get_ref(batch, plane, x);
const AccReal proj = (inp - mean) * projScale;
gradInput.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to((gradOut - proj - gradMean) * gradScale);
} else {
gradInput.get_ref(batch, plane, x) = ScalarConvert<AccReal, DType>::to(
gradOut * gradScale);
}
}
}
}
if (tensors.gradWeight.numElements() > 0 && threadIdx.x == 0 && (flags & WRITE_GAMMA_FLAG) != 0) {
if ((flags & FIX_GAMMA_FLAG) == 0) {
tensors.gradWeight[plane] = ScalarConvert<AccReal, DType>::to(dotP * invstd);
} else {
tensors.gradWeight[plane] = DType(0);
}
}
if (tensors.gradBias.numElements() > 0 && threadIdx.x == 0 && (flags & WRITE_BETA_FLAG) != 0) {
tensors.gradBias[plane] = ScalarConvert<AccReal, DType>::to(gradOutputSum);
}
}
template<typename DType, int Dim>
struct DeviceTensor {
public:
inline DeviceTensor() {}
inline DeviceTensor(DType *p, const int *size)
: dptr_(p) {
for (int i = 0; i < Dim; ++i) {
size_[i] = size ? size[i] : 0;
}
}
MSHADOW_XINLINE unsigned getSize(const int i) const {
return size_[i];
}
MSHADOW_XINLINE int numElements() const {
int n = 1;
for (int i = 0; i < Dim; ++i) {
n *= size_[i];
}
return n;
}
MSHADOW_XINLINE DType &operator()(const size_t batch,
const size_t plane,
const size_t x) const {
int offset = 0;
offset *= size_[0];
offset += batch;
offset *= size_[1];
offset += plane;
offset *= size_[2];
offset += x;
return *(const_cast<DType *>(dptr_ + offset));
}
MSHADOW_XINLINE DType &operator[](const size_t x) const {
return *(dptr_ + x);
}
MSHADOW_XINLINE size_t InnerSize() const {
size_t sz = 1;
for (size_t i = 2; i < Dim; ++i) {
sz *= size_[i];
}
return sz;
}
MSHADOW_XINLINE size_t ChannelCount() const {
return size_[1];
}
DType *dptr_;
int size_[Dim];
};
template<typename DType, int Dim>
static DeviceTensor<DType, Dim> devicetensor(const TBlob &blob) {
CHECK_EQ(blob.type_flag_, mshadow::DataType<DType>::kFlag);
DType *data = blob.dptr<DType>();
const int inDim = blob.shape_.ndim();
if (inDim == Dim) {
DeviceTensor<DType, Dim> tensor(data, nullptr);
for (int i = 0; i < Dim; ++i) {
tensor.size_[i] = blob.size(i);
}
return tensor;
}
// View in which the last dimensions are collapsed or expanded as needed
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = blob.size(i);
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= blob.size(i);
}
}
return DeviceTensor<DType, Dim>(data, &size[0]);
}
#define DeviceTensor1 DeviceTensor<AccReal, 1>
using namespace mxnet::op;
template<typename DType, typename AccReal>
static void BatchNormAddRelualizationUpdateOutput(mshadow::Stream<gpu> *s,
const OpContext &ctx,
const BatchNormAddReluParam& param,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states,
const uint32_t flags,
double momentum,
double eps) {
batchnormaddrelu::BNTensor3<DType> input = batchnormaddrelu::BNTensor3<DType>(
in_data[batchnormaddrelu::kData], param.axis);
batchnormaddrelu::BNTensor3<DType> output = batchnormaddrelu::BNTensor3<DType>(
out_data[batchnormaddrelu::kOut], param.axis);
DeviceTensor1 weight = devicetensor<AccReal, 1>(in_data[batchnormaddrelu::kGamma]);
DeviceTensor1 bias = devicetensor<AccReal, 1>(in_data[batchnormaddrelu::kBeta]);
DeviceTensor1 runningMean = devicetensor<AccReal, 1>(aux_states[batchnormaddrelu::kMovingMean]);
DeviceTensor1 runningVar = devicetensor<AccReal, 1>(aux_states[batchnormaddrelu::kMovingVar]);
DeviceTensor1 saveMean = devicetensor<AccReal, 1>(out_data[batchnormaddrelu::kMean]);
DeviceTensor1 saveInvStd = devicetensor<AccReal, 1>(out_data[batchnormaddrelu::kVar]);
DCHECK_GT(weight.numElements(), 0);
if ((flags & IS_TRAINING_FLAG) == 0 || (flags & USE_GLOBAL_STATS_FLAG) != 0) {
dim3 blocks(input.ChannelCount());
dim3 threads(batchnormaddrelu::cuda::getNumThreads(input.InnerSize(), false));
hipLaunchKernelGGL(( BatchNormAddRelualizationUpdateOutputInferenceKernel<DType, AccReal, DeviceTensor1,
batchnormaddrelu::BNTensor3<DType>>)
, dim3(blocks), dim3(threads), 0, mshadow::Stream<gpu>::GetStream(s) ,
input, output, runningMean, runningVar, saveMean,
saveInvStd, weight, bias, eps, flags);
} else {
dim3 blocks(input.ChannelCount());
dim3 threads(batchnormaddrelu::cuda::getNumThreads(input.InnerSize(), false));
BatchNormAddRelualizationUpdateOutputKernel<DType, AccReal, DeviceTensor1,
batchnormaddrelu::BNTensor3<DType>>
<< < blocks, threads, 0, mshadow::Stream<gpu>::GetStream(s) >> > (
input, output, weight, bias, eps, momentum, runningMean, runningVar,
saveMean, saveInvStd, flags);
}
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormAddRelualizationUpdateOutput);
}
template<typename DType, typename AccReal>
static void BatchNormAddRelualizationBackward(mshadow::Stream<gpu> *s,
const OpContext &ctx,
const BatchNormAddReluParam& param,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states,
const uint32_t flags,
double momentum,
double eps) {
batchnormaddrelu::BNTensor3<DType> input = batchnormaddrelu::BNTensor3<DType>(
in_data[batchnormaddrelu::kData], param.axis);
batchnormaddrelu::BNTensor3<DType>gradOutput = batchnormaddrelu::BNTensor3<DType>(
out_grad[batchnormaddrelu::kOut], param.axis);
batchnormaddrelu::BNTensor3<DType>gradInput = batchnormaddrelu::BNTensor3<DType>(
in_grad[batchnormaddrelu::kData], param.axis);
CHECK_EQ(gradOutput.Size(), gradInput.Size());
CUDATensors<DeviceTensor1> tensors;
tensors.gradWeight = devicetensor<AccReal, 1>(in_grad[batchnormaddrelu::kGamma]);
tensors.gradBias = devicetensor<AccReal, 1>(in_grad[batchnormaddrelu::kBeta]);
tensors.weight = devicetensor<AccReal, 1>(in_data[batchnormaddrelu::kGamma]);
tensors.runningMean = devicetensor<AccReal, 1>(aux_states[batchnormaddrelu::kMovingMean]);
tensors.runningVar = devicetensor<AccReal, 1>(aux_states[batchnormaddrelu::kMovingVar]);
tensors.saveMean = devicetensor<AccReal, 1>(out_data[batchnormaddrelu::kMean]);
tensors.saveInvStd = devicetensor<AccReal, 1>(out_data[batchnormaddrelu::kVar]);
DCHECK_GT(tensors.weight.numElements(), 0);
#ifdef NDEBUG
constexpr bool SMALLER_THREADS = false;
#else
constexpr bool SMALLER_THREADS = true;
#endif
dim3 blocks(gradOutput.ChannelCount());
dim3 threads(batchnormaddrelu::cuda::getNumThreads(gradOutput.InnerSize(), SMALLER_THREADS));
hipLaunchKernelGGL(( BatchNormAddRelualizationBackwardKernel<DType, AccReal, DeviceTensor1,
batchnormaddrelu::BNTensor3<DType>>)
, dim3(blocks), dim3(threads), 0, mshadow::Stream<gpu>::GetStream(s) ,
input, gradOutput, gradInput, tensors, flags, momentum, eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormAddRelualizationBackward);
}
} // namespace cuda
} // namespace batchnormaddrelu
template<typename xpu, typename DType, typename AccReal>
static inline uint32_t SetupFlags(const OpContext &ctx,
const BatchNormAddReluParam& params,
const std::vector<OpReqType> &req) {
uint32_t flags = 0;
flags |= ctx.is_train ? IS_TRAINING_FLAG : 0;
flags |= params.fix_gamma ? FIX_GAMMA_FLAG : 0;
flags |= params.use_global_stats ? USE_GLOBAL_STATS_FLAG : 0;
if (IsBNAddReluWriting(req[batchnormaddrelu::kData])) {
flags |= WRITE_DATA_FLAG;
}
if (IsBNAddReluWriting(req[batchnormaddrelu::kGamma])) {
flags |= WRITE_GAMMA_FLAG;
}
if (IsBNAddReluWriting(req[batchnormaddrelu::kBeta])) {
flags |= WRITE_BETA_FLAG;
}
return flags;
}
/*! \brief Forward batch-norm pass on GPU */
template<typename xpu, typename DType, typename AccReal>
void BatchNormAddReluForwardImpl(mshadow::Stream<gpu> *stream,
const OpContext &ctx, const BatchNormAddReluParam& param_,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
batchnormaddrelu::cuda::BatchNormAddRelualizationUpdateOutput<DType, AccReal>(
stream,
ctx,
param_,
in_data,
out_data,
aux_states,
SetupFlags<xpu, DType, AccReal>(ctx, param_, req),
param_.momentum,
param_.eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormAddReluOp_DoForward_gpu);
}
/*! \brief Backward batch-norm pass on GPU */
template<typename xpu, typename DType, typename AccReal>
void BatchNormAddReluBackwardImpl(mshadow::Stream<gpu> *stream,
const OpContext &ctx, const BatchNormAddReluParam& param_,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
batchnormaddrelu::cuda::BatchNormAddRelualizationBackward<DType, AccReal>(
stream,
ctx,
param_,
out_grad,
in_data,
out_data,
in_grad,
aux_states,
SetupFlags<xpu, DType, AccReal>(ctx, param_, req),
param_.momentum,
param_.eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormAddReluOp_DoBackward_gpu);
}
#if MXNET_USE_CUDNN == 1
template<typename DType>
static NhwcBatchNormAddReluOp<DType> &GetNhwcBNAddReluOp(const BatchNormAddReluParam& param) {
#if DMLC_CXX11_THREAD_LOCAL
static thread_local NhwcBatchNormAddReluOp<DType> op;
#else
static MX_THREAD_LOCAL NhwcBatchNormAddReluOp<DType> op;
#endif
op.Init(param);
return op;
}
#endif
template<>
void BatchNormAddReluCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
BatchNormAddReluParam param = nnvm::get<BatchNormAddReluParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 6U);
std::vector<TBlob> in_data(inputs.begin(), inputs.begin() + batchnormaddrelu::kInMovingMean);
in_data.push_back(inputs[batchnormaddrelu::kAddend]);
std::vector<TBlob> aux_states(inputs.begin() + batchnormaddrelu::kInMovingMean,
inputs.begin() + batchnormaddrelu::kAddend);
int dtype = inputs[0].type_flag_;
mxnet::TShape shape = inputs[0].shape_;
param.axis = mxnet::op::batchnormaddrelu::GetRealAxis(shape, param.axis);
#if MXNET_USE_CUDNN == 1
// We enable the discrete NHWC cuda kernels by the same 'cudnn_off' flag.
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
if (!param.cudnn_off &&
NhwcBatchNormAddReluOp<DType>::Supports(param, dtype, shape, ctx.run_ctx.ctx))
GetNhwcBNAddReluOp<DType>(param).Forward(ctx, in_data, req, outputs, aux_states);
else
BatchNormAddReluForward<gpu, DType, AccReal>(ctx, param, in_data, req, outputs, aux_states);
});
#else
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
BatchNormAddReluForward<gpu, DType, AccReal>(ctx, param, in_data, req, outputs, aux_states);
});
#endif
}
template<>
void BatchNormAddReluGradCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 9U);
BatchNormAddReluParam param = nnvm::get<BatchNormAddReluParam>(attrs.parsed);
int dtype = inputs[0].type_flag_;
mxnet::TShape shape = inputs[0].shape_;
param.axis = mxnet::op::batchnormaddrelu::GetRealAxis(shape, param.axis);
#if MXNET_USE_CUDNN == 1
// We enable the discrete NHWC cuda kernels by the same 'cudnn_off' flag.
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
if (!param.cudnn_off &&
NhwcBatchNormAddReluOp<DType>::Supports(param, dtype, shape, ctx.run_ctx.ctx))
GetNhwcBNAddReluOp<DType>(param).Backward(ctx, inputs, req, outputs);
else
BatchNormAddReluBackward<gpu, DType, AccReal>(ctx, param, inputs, req, outputs);
});
#else
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
BatchNormAddReluBackward<gpu, DType, AccReal>(ctx, param, inputs, req, outputs);
});
#endif
}
namespace batchnormaddrelu {
// Can the function of this batch norm op node be handled by norm convolution?
bool IsCompatibleBatchNorm(const nnvm::ObjectPtr& node, const int& dtype,
const TShape& shape, const Context& ctx) {
if (node->op() != Op::Get("BatchNorm"))
return false;
auto param = nnvm::get<BatchNormParam>(node->attrs.parsed);
bool is_compatible = false;
#if MXNET_USE_CUDNN == 1
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
// TODO(cfujitsang): add condition if using very specific BatchNorm parameters
is_compatible = !param.act_type &&
NhwcBatchNormAddReluOp<DType>::Supports(param, dtype, shape, ctx);
#endif // MXNET_USE_CUDNN
});
return is_compatible;
}
} // namespace batchnormaddrelu
NNVM_REGISTER_OP(BatchNormAddRelu)
.set_attr<FCompute>("FCompute<gpu>", BatchNormAddReluCompute<gpu>);
NNVM_REGISTER_OP(_backward_BatchNormAddRelu)
.set_attr<FCompute>("FCompute<gpu>", BatchNormAddReluGradCompute<gpu>);
} // namespace op
} // namespace mxnet
| 8bff4da2c744ecc12e7af6f47d261a283e3f2c97.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file batch_norm.cu
* \brief CUDA Batch Normalization code
* \author Chris Olivier, Bing Xu, Da Zheng
* Adapted from Torch
*/
#include <cuda_runtime_api.h>
#include <algorithm>
#include "batch_norm-inl.h"
#include "batch_norm_add_relu-inl.h"
#define WRITE_DATA_FLAG 1
#define WRITE_GAMMA_FLAG 2
#define WRITE_BETA_FLAG 4
#define FIX_GAMMA_FLAG 8
#define IS_TRAINING_FLAG 16
#define USE_GLOBAL_STATS_FLAG 32
#if MXNET_USE_CUDNN == 1
#include "./cudnn/nhwc_batch_norm_add_relu-inl.h"
#endif
#include "../../common/cuda/utils.h"
#include "../../../include/mxnet/tensor_blob.h"
using namespace mxnet;
/*! \brief inverse standard deviation <-> variance */
#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/sqrt((__var$) + DType(__eps$)))
#define INVSTD_TO_VARIANCE(__invstd$, __eps$) ((1.0 / ((__invstd$) * (__invstd$))) - (__eps$))
namespace mxnet {
namespace op {
namespace batchnormaddrelu {
namespace cuda {
static const unsigned WARP_SIZE = 32;
// The maximum number of threads in a block
static const unsigned MAX_BLOCK_SIZE = 512U;
template<typename In, typename Out>
struct ScalarConvert {
static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; }
};
// Number of threads in a block given an input size up to MAX_BLOCK_SIZE
static unsigned getNumThreads(int nElem, const bool smaller) {
unsigned threadSizes[5] = {32, 64, 128, 256, MAX_BLOCK_SIZE};
const int maxi = smaller ? 4 : 5;
for (int i = 0; i != maxi; ++i) {
if (static_cast<unsigned>(nElem) <= threadSizes[i]) {
return threadSizes[i];
}
}
return smaller ? (MAX_BLOCK_SIZE >> 1) : MAX_BLOCK_SIZE;
}
// Returns the index of the most significant 1 bit in `val`.
__device__ __forceinline__ int getMSB(int val) {
return 31 - __clz(val);
}
template<typename DType, typename AccReal>
struct Float2 {
AccReal v1, v2;
__device__ Float2() {}
__device__ Float2(DType v1, DType v2)
: v1(ScalarConvert<DType, AccReal>::to(v1))
, v2(ScalarConvert<DType, AccReal>::to(v2)) {}
__device__ Float2(DType v)
: v1(ScalarConvert<DType, AccReal>::to(v))
, v2(ScalarConvert<DType, AccReal>::to(v)) {}
__device__ Float2(int v)
: v1(ScalarConvert<int, AccReal>::to(v))
, v2(ScalarConvert<int, AccReal>::to(v)) {}
__device__ Float2 &operator+=(const Float2 &a) {
v1 += a.v1;
v2 += a.v2;
return *this;
}
};
template<typename DType, typename AccReal, typename DeviceTensor>
struct SumOp {
__device__ SumOp(const DeviceTensor t) : tensor(t) {}
__device__ __forceinline__ AccReal operator()(int batch, int plane, int n) {
return ScalarConvert<DType, AccReal>::to(tensor.get_ref(batch, plane, n));
}
const DeviceTensor tensor;
};
template<typename DType, typename AccReal, typename DeviceTensor>
struct VarOp {
__device__ VarOp(AccReal m, const DeviceTensor t)
: mean(m)
, tensor(t) {
}
__device__ __forceinline__ AccReal operator()(int batch, int plane, int n) {
DType val = tensor.get_ref(batch, plane, n);
return (val - mean) * (val - mean);
}
const AccReal mean;
const DeviceTensor tensor;
};
template<typename DType, typename AccReal, typename DeviceTensor>
struct GradOp {
__device__ GradOp(AccReal m, const DeviceTensor i, const DeviceTensor g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, AccReal> operator()(int batch, int plane, int n) {
const DType g = gradOutput.get_ref(batch, plane, n);
const DType c = ScalarConvert<AccReal, DType>::to(input.get_ref(batch, plane, n) - mean);
return Float2<DType, AccReal>(g, g * c);
}
const AccReal mean;
const DeviceTensor input;
const DeviceTensor gradOutput;
};
#if CUDA_VERSION >= 9000
#define FULLMASK 0xFFFFFFFF
#define __shfl_xor(...) __shfl_xor_sync(FULLMASK, __VA_ARGS__)
#endif
// Sum across all threads within a warp
template<typename T>
static __device__ __forceinline__ T warpSum(T val) {
#if __CUDA_ARCH__ >= 300
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
val += __shfl_xor(val, 1 << i, WARP_SIZE);
}
#else
__shared__ T values[MAX_BLOCK_SIZE];
values[threadIdx.x] = val;
__threadfence_block();
const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE;
for (int i = 1; i < WARP_SIZE; i++) {
val += values[base + ((i + threadIdx.x) % WARP_SIZE)];
}
#endif
return val;
}
template<typename DType, typename AccReal>
static __device__ __forceinline__ Float2<DType, AccReal> warpSum(Float2<DType, AccReal> value) {
value.v1 = warpSum(value.v1);
value.v2 = warpSum(value.v2);
return value;
}
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor>
static __device__ T reduce(Op op, DeviceTensor tensor, int plane) {
T sum = (T) 0;
for (int batch = 0; batch < tensor.OuterSize(); ++batch) {
for (int x = threadIdx.x; x < tensor.InnerSize(); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T) 0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
__global__ void BatchNormAddRelualizationUpdateOutputInferenceKernel(
DeviceTensor input,
DeviceTensor output,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveInvStd,
DeviceTensor1 weight,
DeviceTensor1 bias,
const DType epsilon,
const uint32_t flags) {
int plane = blockIdx.x;
AccReal invstd = VARIANCE_TO_INVSTD(runningVar[plane], epsilon);
AccReal mean = ScalarConvert<DType, AccReal>::to(runningMean[plane]);
AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0)
? ScalarConvert<DType, AccReal>::to(weight[plane])
: ScalarConvert<int, AccReal>::to(1);
AccReal beta = bias.numElements() > 0 ? ScalarConvert<DType, AccReal>::to(bias[plane])
: ScalarConvert<int, AccReal>::to(0);
if (threadIdx.x == 0) {
saveMean[plane] = runningMean[plane];
saveInvStd[plane] = VARIANCE_TO_INVSTD(runningVar[plane], epsilon);
if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0
&& weight.numElements() > 0) {
weight[plane] = AccReal(1);
}
}
// Write normalized and update the output
for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) {
const DType inp = input.get_ref(batch, plane, x);
output.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to(gamma * (inp - mean) * invstd + beta);
}
}
}
template<typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
__global__ void BatchNormAddRelualizationUpdateOutputKernel(
DeviceTensor input,
DeviceTensor output,
DeviceTensor1 weight,
DeviceTensor1 bias,
const AccReal epsilon,
const AccReal momentum,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveInvStd,
const uint32_t flags) {
const int plane = blockIdx.x;
const int N = input.OuterSize() * input.InnerSize();
const AccReal norm = AccReal(1) / N;
// Compute the mean and variance across (batch, x/y/z)
const AccReal mean = reduce<AccReal>(
SumOp<DType, AccReal, DeviceTensor>(input), input, plane) * norm;
__syncthreads();
const AccReal varN = reduce<AccReal>(VarOp<DType, AccReal, DeviceTensor>(mean, input),
input, plane);
AccReal invStd = 0;
if (varN != AccReal(0) || epsilon != AccReal(0)) {
invStd = AccReal(1.0) / sqrt(varN * norm + epsilon);
}
// Save the mean, variance, and moving averages
if (threadIdx.x == 0) {
// For one item (0th) per plane (channel), write the per-channel data (ie mean, variance, etc)
// Momentum based writeback
saveMean[plane] = ScalarConvert<AccReal, DType>::to(mean);
saveInvStd[plane] = invStd;
if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0
&& weight.numElements() > 0) {
weight[plane] = AccReal(1);
}
}
// Write normalized and update the output
const AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0)
? ScalarConvert<DType, AccReal>::to(weight[plane])
: ScalarConvert<int, AccReal>::to(1);
const AccReal beta = bias.numElements() > 0 ? ScalarConvert<DType, AccReal>::to(bias[plane])
: ScalarConvert<int, AccReal>::to(0);
for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) {
const DType inp = input.get_ref(batch, plane, x);
output.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to(gamma * (inp - mean) * invStd + beta);
}
}
}
template<typename DeviceTensor1>
struct CUDATensors {
DeviceTensor1 gradWeight;
DeviceTensor1 gradBias;
DeviceTensor1 weight;
DeviceTensor1 runningMean;
DeviceTensor1 runningVar;
DeviceTensor1 saveMean;
DeviceTensor1 saveInvStd;
};
template<typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
static __global__ void BatchNormAddRelualizationBackwardKernel(
const DeviceTensor input,
const DeviceTensor gradOutput,
DeviceTensor gradInput,
CUDATensors<DeviceTensor1> tensors,
const uint32_t flags,
const AccReal momentum,
const double eps) {
int plane = blockIdx.x;
int N = gradOutput.OuterSize() * gradOutput.InnerSize();
const bool is_train_and_not_global_stats =
(flags & IS_TRAINING_FLAG) != 0 && (flags & USE_GLOBAL_STATS_FLAG) == 0;
AccReal mean, invstd;
if (is_train_and_not_global_stats) {
mean = ScalarConvert<DType, AccReal>::to(tensors.saveMean[plane]);
invstd = tensors.saveInvStd[plane];
} else {
mean = ScalarConvert<DType, AccReal>::to(tensors.runningMean[plane]);
invstd = VARIANCE_TO_INVSTD(tensors.runningVar[plane], eps);
}
const AccReal weightVal = ((flags & FIX_GAMMA_FLAG) == 0 && tensors.weight.numElements() > 0) ?
ScalarConvert<DType, AccReal>::to(tensors.weight[plane]) : AccReal(1);
const AccReal norm = AccReal(1) / N;
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(gradOutput)
// 2. DotProduct(input - mean, gradOutput)
GradOp<DType, AccReal, DeviceTensor> g(mean, input, gradOutput);
Float2< DType, AccReal > res = reduce < Float2 < DType, AccReal >,
GradOp< DType, AccReal, DeviceTensor >, DeviceTensor > (g, gradOutput, plane);
const AccReal gradOutputSum = res.v1;
const AccReal dotP = res.v2;
const AccReal gradMean = gradOutputSum * norm;
const AccReal projScale = dotP * norm * invstd * invstd;
const AccReal gradScale = invstd * weightVal;
if (threadIdx.x == 0 && is_train_and_not_global_stats) {
const AccReal localVariance = INVSTD_TO_VARIANCE(tensors.saveInvStd[plane], eps);
const AccReal localMean = tensors.saveMean[plane];
// update running averages
tensors.runningMean[plane] = tensors.runningMean[plane]
* momentum + localMean * (AccReal(1) - momentum);
tensors.runningVar[plane] = tensors.runningVar[plane]
* momentum + localVariance * (AccReal(1) - momentum);
}
if (gradInput.Size() > 0 && (flags & WRITE_DATA_FLAG) != 0) {
for (int batch = 0, nbatch = gradOutput.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = gradOutput.InnerSize(); x < nx; x += blockDim.x) {
const DType gradOut = gradOutput.get_ref(batch, plane, x);
if (is_train_and_not_global_stats) {
const DType inp = input.get_ref(batch, plane, x);
const AccReal proj = (inp - mean) * projScale;
gradInput.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to((gradOut - proj - gradMean) * gradScale);
} else {
gradInput.get_ref(batch, plane, x) = ScalarConvert<AccReal, DType>::to(
gradOut * gradScale);
}
}
}
}
if (tensors.gradWeight.numElements() > 0 && threadIdx.x == 0 && (flags & WRITE_GAMMA_FLAG) != 0) {
if ((flags & FIX_GAMMA_FLAG) == 0) {
tensors.gradWeight[plane] = ScalarConvert<AccReal, DType>::to(dotP * invstd);
} else {
tensors.gradWeight[plane] = DType(0);
}
}
if (tensors.gradBias.numElements() > 0 && threadIdx.x == 0 && (flags & WRITE_BETA_FLAG) != 0) {
tensors.gradBias[plane] = ScalarConvert<AccReal, DType>::to(gradOutputSum);
}
}
template<typename DType, int Dim>
struct DeviceTensor {
public:
inline DeviceTensor() {}
inline DeviceTensor(DType *p, const int *size)
: dptr_(p) {
for (int i = 0; i < Dim; ++i) {
size_[i] = size ? size[i] : 0;
}
}
MSHADOW_XINLINE unsigned getSize(const int i) const {
return size_[i];
}
MSHADOW_XINLINE int numElements() const {
int n = 1;
for (int i = 0; i < Dim; ++i) {
n *= size_[i];
}
return n;
}
MSHADOW_XINLINE DType &operator()(const size_t batch,
const size_t plane,
const size_t x) const {
int offset = 0;
offset *= size_[0];
offset += batch;
offset *= size_[1];
offset += plane;
offset *= size_[2];
offset += x;
return *(const_cast<DType *>(dptr_ + offset));
}
MSHADOW_XINLINE DType &operator[](const size_t x) const {
return *(dptr_ + x);
}
MSHADOW_XINLINE size_t InnerSize() const {
size_t sz = 1;
for (size_t i = 2; i < Dim; ++i) {
sz *= size_[i];
}
return sz;
}
MSHADOW_XINLINE size_t ChannelCount() const {
return size_[1];
}
DType *dptr_;
int size_[Dim];
};
template<typename DType, int Dim>
static DeviceTensor<DType, Dim> devicetensor(const TBlob &blob) {
CHECK_EQ(blob.type_flag_, mshadow::DataType<DType>::kFlag);
DType *data = blob.dptr<DType>();
const int inDim = blob.shape_.ndim();
if (inDim == Dim) {
DeviceTensor<DType, Dim> tensor(data, nullptr);
for (int i = 0; i < Dim; ++i) {
tensor.size_[i] = blob.size(i);
}
return tensor;
}
// View in which the last dimensions are collapsed or expanded as needed
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = blob.size(i);
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= blob.size(i);
}
}
return DeviceTensor<DType, Dim>(data, &size[0]);
}
#define DeviceTensor1 DeviceTensor<AccReal, 1>
using namespace mxnet::op;
template<typename DType, typename AccReal>
static void BatchNormAddRelualizationUpdateOutput(mshadow::Stream<gpu> *s,
const OpContext &ctx,
const BatchNormAddReluParam& param,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states,
const uint32_t flags,
double momentum,
double eps) {
batchnormaddrelu::BNTensor3<DType> input = batchnormaddrelu::BNTensor3<DType>(
in_data[batchnormaddrelu::kData], param.axis);
batchnormaddrelu::BNTensor3<DType> output = batchnormaddrelu::BNTensor3<DType>(
out_data[batchnormaddrelu::kOut], param.axis);
DeviceTensor1 weight = devicetensor<AccReal, 1>(in_data[batchnormaddrelu::kGamma]);
DeviceTensor1 bias = devicetensor<AccReal, 1>(in_data[batchnormaddrelu::kBeta]);
DeviceTensor1 runningMean = devicetensor<AccReal, 1>(aux_states[batchnormaddrelu::kMovingMean]);
DeviceTensor1 runningVar = devicetensor<AccReal, 1>(aux_states[batchnormaddrelu::kMovingVar]);
DeviceTensor1 saveMean = devicetensor<AccReal, 1>(out_data[batchnormaddrelu::kMean]);
DeviceTensor1 saveInvStd = devicetensor<AccReal, 1>(out_data[batchnormaddrelu::kVar]);
DCHECK_GT(weight.numElements(), 0);
if ((flags & IS_TRAINING_FLAG) == 0 || (flags & USE_GLOBAL_STATS_FLAG) != 0) {
dim3 blocks(input.ChannelCount());
dim3 threads(batchnormaddrelu::cuda::getNumThreads(input.InnerSize(), false));
BatchNormAddRelualizationUpdateOutputInferenceKernel<DType, AccReal, DeviceTensor1,
batchnormaddrelu::BNTensor3<DType>>
<<< blocks, threads, 0, mshadow::Stream<gpu>::GetStream(s) >>> (
input, output, runningMean, runningVar, saveMean,
saveInvStd, weight, bias, eps, flags);
} else {
dim3 blocks(input.ChannelCount());
dim3 threads(batchnormaddrelu::cuda::getNumThreads(input.InnerSize(), false));
BatchNormAddRelualizationUpdateOutputKernel<DType, AccReal, DeviceTensor1,
batchnormaddrelu::BNTensor3<DType>>
<< < blocks, threads, 0, mshadow::Stream<gpu>::GetStream(s) >> > (
input, output, weight, bias, eps, momentum, runningMean, runningVar,
saveMean, saveInvStd, flags);
}
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormAddRelualizationUpdateOutput);
}
template<typename DType, typename AccReal>
static void BatchNormAddRelualizationBackward(mshadow::Stream<gpu> *s,
const OpContext &ctx,
const BatchNormAddReluParam& param,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states,
const uint32_t flags,
double momentum,
double eps) {
batchnormaddrelu::BNTensor3<DType> input = batchnormaddrelu::BNTensor3<DType>(
in_data[batchnormaddrelu::kData], param.axis);
batchnormaddrelu::BNTensor3<DType>gradOutput = batchnormaddrelu::BNTensor3<DType>(
out_grad[batchnormaddrelu::kOut], param.axis);
batchnormaddrelu::BNTensor3<DType>gradInput = batchnormaddrelu::BNTensor3<DType>(
in_grad[batchnormaddrelu::kData], param.axis);
CHECK_EQ(gradOutput.Size(), gradInput.Size());
CUDATensors<DeviceTensor1> tensors;
tensors.gradWeight = devicetensor<AccReal, 1>(in_grad[batchnormaddrelu::kGamma]);
tensors.gradBias = devicetensor<AccReal, 1>(in_grad[batchnormaddrelu::kBeta]);
tensors.weight = devicetensor<AccReal, 1>(in_data[batchnormaddrelu::kGamma]);
tensors.runningMean = devicetensor<AccReal, 1>(aux_states[batchnormaddrelu::kMovingMean]);
tensors.runningVar = devicetensor<AccReal, 1>(aux_states[batchnormaddrelu::kMovingVar]);
tensors.saveMean = devicetensor<AccReal, 1>(out_data[batchnormaddrelu::kMean]);
tensors.saveInvStd = devicetensor<AccReal, 1>(out_data[batchnormaddrelu::kVar]);
DCHECK_GT(tensors.weight.numElements(), 0);
#ifdef NDEBUG
constexpr bool SMALLER_THREADS = false;
#else
constexpr bool SMALLER_THREADS = true;
#endif
dim3 blocks(gradOutput.ChannelCount());
dim3 threads(batchnormaddrelu::cuda::getNumThreads(gradOutput.InnerSize(), SMALLER_THREADS));
BatchNormAddRelualizationBackwardKernel<DType, AccReal, DeviceTensor1,
batchnormaddrelu::BNTensor3<DType>>
<<< blocks, threads, 0, mshadow::Stream<gpu>::GetStream(s) >>> (
input, gradOutput, gradInput, tensors, flags, momentum, eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormAddRelualizationBackward);
}
} // namespace cuda
} // namespace batchnormaddrelu
template<typename xpu, typename DType, typename AccReal>
static inline uint32_t SetupFlags(const OpContext &ctx,
const BatchNormAddReluParam& params,
const std::vector<OpReqType> &req) {
uint32_t flags = 0;
flags |= ctx.is_train ? IS_TRAINING_FLAG : 0;
flags |= params.fix_gamma ? FIX_GAMMA_FLAG : 0;
flags |= params.use_global_stats ? USE_GLOBAL_STATS_FLAG : 0;
if (IsBNAddReluWriting(req[batchnormaddrelu::kData])) {
flags |= WRITE_DATA_FLAG;
}
if (IsBNAddReluWriting(req[batchnormaddrelu::kGamma])) {
flags |= WRITE_GAMMA_FLAG;
}
if (IsBNAddReluWriting(req[batchnormaddrelu::kBeta])) {
flags |= WRITE_BETA_FLAG;
}
return flags;
}
/*! \brief Forward batch-norm pass on GPU */
template<typename xpu, typename DType, typename AccReal>
void BatchNormAddReluForwardImpl(mshadow::Stream<gpu> *stream,
const OpContext &ctx, const BatchNormAddReluParam& param_,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
batchnormaddrelu::cuda::BatchNormAddRelualizationUpdateOutput<DType, AccReal>(
stream,
ctx,
param_,
in_data,
out_data,
aux_states,
SetupFlags<xpu, DType, AccReal>(ctx, param_, req),
param_.momentum,
param_.eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormAddReluOp_DoForward_gpu);
}
/*! \brief Backward batch-norm pass on GPU */
template<typename xpu, typename DType, typename AccReal>
void BatchNormAddReluBackwardImpl(mshadow::Stream<gpu> *stream,
const OpContext &ctx, const BatchNormAddReluParam& param_,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
batchnormaddrelu::cuda::BatchNormAddRelualizationBackward<DType, AccReal>(
stream,
ctx,
param_,
out_grad,
in_data,
out_data,
in_grad,
aux_states,
SetupFlags<xpu, DType, AccReal>(ctx, param_, req),
param_.momentum,
param_.eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormAddReluOp_DoBackward_gpu);
}
#if MXNET_USE_CUDNN == 1
template<typename DType>
static NhwcBatchNormAddReluOp<DType> &GetNhwcBNAddReluOp(const BatchNormAddReluParam& param) {
#if DMLC_CXX11_THREAD_LOCAL
static thread_local NhwcBatchNormAddReluOp<DType> op;
#else
static MX_THREAD_LOCAL NhwcBatchNormAddReluOp<DType> op;
#endif
op.Init(param);
return op;
}
#endif
template<>
void BatchNormAddReluCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
BatchNormAddReluParam param = nnvm::get<BatchNormAddReluParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 6U);
std::vector<TBlob> in_data(inputs.begin(), inputs.begin() + batchnormaddrelu::kInMovingMean);
in_data.push_back(inputs[batchnormaddrelu::kAddend]);
std::vector<TBlob> aux_states(inputs.begin() + batchnormaddrelu::kInMovingMean,
inputs.begin() + batchnormaddrelu::kAddend);
int dtype = inputs[0].type_flag_;
mxnet::TShape shape = inputs[0].shape_;
param.axis = mxnet::op::batchnormaddrelu::GetRealAxis(shape, param.axis);
#if MXNET_USE_CUDNN == 1
// We enable the discrete NHWC cuda kernels by the same 'cudnn_off' flag.
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
if (!param.cudnn_off &&
NhwcBatchNormAddReluOp<DType>::Supports(param, dtype, shape, ctx.run_ctx.ctx))
GetNhwcBNAddReluOp<DType>(param).Forward(ctx, in_data, req, outputs, aux_states);
else
BatchNormAddReluForward<gpu, DType, AccReal>(ctx, param, in_data, req, outputs, aux_states);
});
#else
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
BatchNormAddReluForward<gpu, DType, AccReal>(ctx, param, in_data, req, outputs, aux_states);
});
#endif
}
template<>
void BatchNormAddReluGradCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 9U);
BatchNormAddReluParam param = nnvm::get<BatchNormAddReluParam>(attrs.parsed);
int dtype = inputs[0].type_flag_;
mxnet::TShape shape = inputs[0].shape_;
param.axis = mxnet::op::batchnormaddrelu::GetRealAxis(shape, param.axis);
#if MXNET_USE_CUDNN == 1
// We enable the discrete NHWC cuda kernels by the same 'cudnn_off' flag.
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
if (!param.cudnn_off &&
NhwcBatchNormAddReluOp<DType>::Supports(param, dtype, shape, ctx.run_ctx.ctx))
GetNhwcBNAddReluOp<DType>(param).Backward(ctx, inputs, req, outputs);
else
BatchNormAddReluBackward<gpu, DType, AccReal>(ctx, param, inputs, req, outputs);
});
#else
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
BatchNormAddReluBackward<gpu, DType, AccReal>(ctx, param, inputs, req, outputs);
});
#endif
}
namespace batchnormaddrelu {
// Can the function of this batch norm op node be handled by norm convolution?
bool IsCompatibleBatchNorm(const nnvm::ObjectPtr& node, const int& dtype,
const TShape& shape, const Context& ctx) {
if (node->op() != Op::Get("BatchNorm"))
return false;
auto param = nnvm::get<BatchNormParam>(node->attrs.parsed);
bool is_compatible = false;
#if MXNET_USE_CUDNN == 1
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
// TODO(cfujitsang): add condition if using very specific BatchNorm parameters
is_compatible = !param.act_type &&
NhwcBatchNormAddReluOp<DType>::Supports(param, dtype, shape, ctx);
#endif // MXNET_USE_CUDNN
});
return is_compatible;
}
} // namespace batchnormaddrelu
NNVM_REGISTER_OP(BatchNormAddRelu)
.set_attr<FCompute>("FCompute<gpu>", BatchNormAddReluCompute<gpu>);
NNVM_REGISTER_OP(_backward_BatchNormAddRelu)
.set_attr<FCompute>("FCompute<gpu>", BatchNormAddReluGradCompute<gpu>);
} // namespace op
} // namespace mxnet
|
006ad9b6ced50d07d828a3460178e7e7f6f0df63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// PAZAM: A CUDA Music Identification Tool
// Michael Wilner - Cody Van Etten - Ahmed Suhyl
// ...
// GPU Accelerated Version
#include <stdio.h>
#include <math.h>
#include "tinydir.h"
#include "kernel.hip"
#include <time.h>
#define BYTES_PER_SAMPLE 2
#define MAXSONGS 10
#define COLSPERSONG 10
#define FREQBANDWIDTH 50
#define MAXCOLS (MAXSONGS*COLSPERSONG)
#define THREADS_PER_BLOCK N
#define FATAL(msg, ...) \
do {\
fprintf(stderr, "[%s:%d] "msg"\n", __FILE__, __LINE__, ##__VA_ARGS__);\
exit(-1);\
} while(0)
//static float A[2*N]; /* available for modifying transform */
//static float Z[2*N];
int FUZ_FACTOR = 2;
long hash1(long p1, long p2, long p3, long p4) {
return (p4 - (p4 % FUZ_FACTOR)) * 100000000 + (p3 - (p3 % FUZ_FACTOR))
* 100000 + (p2 - (p2 % FUZ_FACTOR)) * 100
+ (p1 - (p1 % FUZ_FACTOR));
}
int generatehashes(char *input_file, int mysongid, int * hash_songs)
{
//float* Z = (float*) malloc(sizeof(float) * N * 2);
int i, sect, sectcnt;
FILE * inp;
char riff[4];
int sread; /* bytes read/written */
int fsize;
char wave[4];
char fmt[4];
int nbytes;
short ccode;
short channels;
int rate;
int avgrate; /* average rate in samples per second */
short blockalign;
short bps; /* bits per sample */
char data[4];
int csize;
char stuf;
short soundin; /* sample of sound */
int bad; /* flags bad data in read */
//int nbread; /* number of bytes read */
inp = fopen(input_file, "rb");
if(inp == NULL)
{
printf("can not open %s for reading. \n", input_file);
return 0;
}
//printf("reading %s \n", input_file);
sread = fread(&riff[0], 1, 4, inp);
//printf("first 4 bytes should be RIFF, <%c%c%c%c>\n", riff[0],riff[1],riff[2],riff[3]);
sread = fread(&fsize, 1, 4, inp);
//printf("file has %d +8 bytes \n", fsize);
sread = fread(&wave[0], 1, 4, inp);
//printf("should be WAVE, <%c%c%c%c>\n",wave[0],wave[1],wave[2],wave[3]);
sread = fread(&fmt[0], 1, 4, inp);
//printf("should be fmt, <%c%c%c%c>\n",fmt[0],fmt[1],fmt[2],fmt[3]);
sread = fread(&nbytes, 1, 4, inp);
//printf("block has %d more bytes \n", nbytes);
sread = fread(&ccode, 1, 2, inp);
//printf("compression code = %d \n", ccode);
nbytes = nbytes-2;
sread = fread(&channels, 1, 2, inp);
//printf("channels = %d \n", channels);
nbytes = nbytes-2;
sread = fread(&rate, 1, 4, inp);
//printf("rate = %d \n", rate);
nbytes = nbytes-4;
sread = fread(&avgrate, 1, 4, inp);
//printf("avg rate = %d \n", avgrate);
nbytes = nbytes-4;
sread = fread(&blockalign, 1, 2, inp);
//printf("blockalign = %d \n", blockalign);
nbytes = nbytes-2;
sread = fread(&bps, 1, 2, inp);
//printf("bits per sample = %d \n", bps);
nbytes = nbytes-2;
//printf("bytes left in fmt = %d \n", nbytes);
for(i=0; i<nbytes; i++) sread = fread(&stuf, 1, 1, inp);
sread = fread(&data[0], 1, 4, inp);
//printf("should be data, <%c%c%c%c>\n",data[0],data[1],data[2],data[3]);
sread = fread(&csize, 1, 4, inp);
//printf("chunk has %d more bytes \n", csize);
//nbread = 44+nbytes;
//printf("%d bytes read so far \n", nbread);
bad = 0;
sect = 0;
sectcnt = 0;
float* G = (float*) malloc(sizeof(float) * csize);
for(i=0; i<csize; i+=BYTES_PER_SAMPLE)
{
if(sect<N)
{
sread = fread(&soundin, 1, BYTES_PER_SAMPLE, inp); //We have to make sure we're reading both channels
if(sread != BYTES_PER_SAMPLE && bad==0) { bad=1; printf("no read on byte %d \n", i); }
G[sectcnt*2*N + 2*sect] = (float)soundin;
G[sectcnt*2*N + 2*sect+1] = 0.0; /* no complex component */
sect++;
}
else
{
sectcnt++;
i-=BYTES_PER_SAMPLE;
sect = 0;
}
}
float *in_h;
float *in_d;
hipError_t cuda_ret;
dim3 dim_grid, dim_block;
in_h = G;
cuda_ret = hipMalloc((void**)&in_d, sectcnt * N * sizeof(float));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate input device memory");
hipDeviceSynchronize();
cuda_ret = hipMemcpy(in_d, in_h, sectcnt * N * sizeof(float),hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) FATAL("Unable to copy input audio to device");
hipDeviceSynchronize();
/* Launch the song process kernel */
dim_block.x = BLOCK_SIZE; dim_block.y = dim_block.z = 1;
dim_grid.x = (sectcnt - 1)/THREADS_PER_BLOCK + 1; dim_grid.y = dim_grid.z = 1;
hipLaunchKernelGGL(( parallelhash), dim3(dim_grid), dim3(dim_block), 0, 0, in_d, sectcnt, hash_songs, mysongid);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Kernel failed");
hipFree(in_d);
fclose(inp);
free(G);
return csize>>6; //Since we can't count the hashes directly, we just use the song length
}
int main(int argc, char * argv[])
{
int i = 0, n=0;
float count = 0;
int numsongs = 0;
char filenames [MAXSONGS+1][_TINYDIR_FILENAME_MAX];
int filesizes [MAXSONGS+1];
int songscores [MAXSONGS+1];
float songmatch [MAXSONGS+1];
clock_t start, diff;
clock_t start_total, diff_total;
int msec;
int * hash_songs;
printf("GPU Pazam running... \n");
if(argc<2)
{
printf("no excerpt file to open \n");
exit(1);
}
start_total = clock();
hipError_t cuda_ret;
cuda_ret = hipMalloc((void**)&hash_songs, MAXELEMS*(MAXSONGS+1)*sizeof(int));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate output device memory");
cuda_ret = hipMemset(hash_songs, 0, MAXELEMS*(MAXSONGS+1)*sizeof(int));
if(cuda_ret != hipSuccess) FATAL("Unable to zero out device memory");
printf("Generating hashes for original files.. \n");
tinydir_dir dir;
tinydir_open(&dir, "../data");
while (dir.has_next)
{
tinydir_file file;
tinydir_readfile(&dir, &file);
if (file.is_reg)
{
numsongs++;
start = clock();
filesizes[numsongs] = generatehashes(file.path, numsongs, hash_songs);
diff = clock() - start;
msec = diff * 1000 / CLOCKS_PER_SEC;
printf("%d:%d length for %s\n", numsongs, filesizes[numsongs], file.path);
printf("Time taken: %d seconds %d milliseconds\n", msec/1000, msec%1000);
strcpy(filenames[numsongs],file.name);
}
tinydir_next(&dir);
}
tinydir_close(&dir);
printf("Generating hashes for recorded file.. \n");
generatehashes(argv[1], 0, hash_songs);
printf("Calculating score.. \n");
int * songscores_d;
cuda_ret = hipMalloc((void**)&songscores_d, (MAXSONGS+1)*sizeof(int));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate output device memory");
cuda_ret = hipMemset(songscores_d, 0, (MAXSONGS+1)*sizeof(int));
if(cuda_ret != hipSuccess) FATAL("Unable to zero out device memory");
dim3 dim_grid, dim_block;
dim_block.x = BLOCK_SIZE; dim_block.y = dim_block.z = 1;
dim_grid.x = (MAXELEMS - 1)/THREADS_PER_BLOCK + 1; dim_grid.y = dim_grid.z = 1;
hipLaunchKernelGGL(( calc_scores), dim3(dim_grid), dim3(dim_block), 0, 0, hash_songs, songscores_d);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Kernel failed");
cuda_ret = hipMemcpy(songscores, songscores_d, (MAXSONGS+1)*sizeof(int), hipMemcpyDeviceToHost);
if(cuda_ret != hipSuccess) FATAL("Unable to copy output to host");
hipDeviceSynchronize();
hipFree(songscores_d);
hipFree(hash_songs);
for(i = 1; i<=numsongs; i++){
songmatch[i] = ((float)songscores[i])/((float)filesizes[i]);
printf("Score for %s = %f\n", filenames[i], songmatch[i]);
if(songmatch[i]>count){
count = songmatch[i];
n = i;
}
}
printf("Best Score: %s\n", filenames[n]);
diff_total = clock() - start_total;
msec = diff_total * 1000 / CLOCKS_PER_SEC;
printf("Total time taken: %d seconds %d milliseconds\n", msec/1000, msec%1000);
return 0;
}
| 006ad9b6ced50d07d828a3460178e7e7f6f0df63.cu | // PAZAM: A CUDA Music Identification Tool
// Michael Wilner - Cody Van Etten - Ahmed Suhyl
// ...
// GPU Accelerated Version
#include <stdio.h>
#include <math.h>
#include "tinydir.h"
#include "kernel.cu"
#include <time.h>
#define BYTES_PER_SAMPLE 2
#define MAXSONGS 10
#define COLSPERSONG 10
#define FREQBANDWIDTH 50
#define MAXCOLS (MAXSONGS*COLSPERSONG)
#define THREADS_PER_BLOCK N
#define FATAL(msg, ...) \
do {\
fprintf(stderr, "[%s:%d] "msg"\n", __FILE__, __LINE__, ##__VA_ARGS__);\
exit(-1);\
} while(0)
//static float A[2*N]; /* available for modifying transform */
//static float Z[2*N];
int FUZ_FACTOR = 2;
long hash1(long p1, long p2, long p3, long p4) {
return (p4 - (p4 % FUZ_FACTOR)) * 100000000 + (p3 - (p3 % FUZ_FACTOR))
* 100000 + (p2 - (p2 % FUZ_FACTOR)) * 100
+ (p1 - (p1 % FUZ_FACTOR));
}
int generatehashes(char *input_file, int mysongid, int * hash_songs)
{
//float* Z = (float*) malloc(sizeof(float) * N * 2);
int i, sect, sectcnt;
FILE * inp;
char riff[4];
int sread; /* bytes read/written */
int fsize;
char wave[4];
char fmt[4];
int nbytes;
short ccode;
short channels;
int rate;
int avgrate; /* average rate in samples per second */
short blockalign;
short bps; /* bits per sample */
char data[4];
int csize;
char stuf;
short soundin; /* sample of sound */
int bad; /* flags bad data in read */
//int nbread; /* number of bytes read */
inp = fopen(input_file, "rb");
if(inp == NULL)
{
printf("can not open %s for reading. \n", input_file);
return 0;
}
//printf("reading %s \n", input_file);
sread = fread(&riff[0], 1, 4, inp);
//printf("first 4 bytes should be RIFF, <%c%c%c%c>\n", riff[0],riff[1],riff[2],riff[3]);
sread = fread(&fsize, 1, 4, inp);
//printf("file has %d +8 bytes \n", fsize);
sread = fread(&wave[0], 1, 4, inp);
//printf("should be WAVE, <%c%c%c%c>\n",wave[0],wave[1],wave[2],wave[3]);
sread = fread(&fmt[0], 1, 4, inp);
//printf("should be fmt, <%c%c%c%c>\n",fmt[0],fmt[1],fmt[2],fmt[3]);
sread = fread(&nbytes, 1, 4, inp);
//printf("block has %d more bytes \n", nbytes);
sread = fread(&ccode, 1, 2, inp);
//printf("compression code = %d \n", ccode);
nbytes = nbytes-2;
sread = fread(&channels, 1, 2, inp);
//printf("channels = %d \n", channels);
nbytes = nbytes-2;
sread = fread(&rate, 1, 4, inp);
//printf("rate = %d \n", rate);
nbytes = nbytes-4;
sread = fread(&avgrate, 1, 4, inp);
//printf("avg rate = %d \n", avgrate);
nbytes = nbytes-4;
sread = fread(&blockalign, 1, 2, inp);
//printf("blockalign = %d \n", blockalign);
nbytes = nbytes-2;
sread = fread(&bps, 1, 2, inp);
//printf("bits per sample = %d \n", bps);
nbytes = nbytes-2;
//printf("bytes left in fmt = %d \n", nbytes);
for(i=0; i<nbytes; i++) sread = fread(&stuf, 1, 1, inp);
sread = fread(&data[0], 1, 4, inp);
//printf("should be data, <%c%c%c%c>\n",data[0],data[1],data[2],data[3]);
sread = fread(&csize, 1, 4, inp);
//printf("chunk has %d more bytes \n", csize);
//nbread = 44+nbytes;
//printf("%d bytes read so far \n", nbread);
bad = 0;
sect = 0;
sectcnt = 0;
float* G = (float*) malloc(sizeof(float) * csize);
for(i=0; i<csize; i+=BYTES_PER_SAMPLE)
{
if(sect<N)
{
sread = fread(&soundin, 1, BYTES_PER_SAMPLE, inp); //We have to make sure we're reading both channels
if(sread != BYTES_PER_SAMPLE && bad==0) { bad=1; printf("no read on byte %d \n", i); }
G[sectcnt*2*N + 2*sect] = (float)soundin;
G[sectcnt*2*N + 2*sect+1] = 0.0; /* no complex component */
sect++;
}
else
{
sectcnt++;
i-=BYTES_PER_SAMPLE;
sect = 0;
}
}
float *in_h;
float *in_d;
cudaError_t cuda_ret;
dim3 dim_grid, dim_block;
in_h = G;
cuda_ret = cudaMalloc((void**)&in_d, sectcnt * N * sizeof(float));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate input device memory");
cudaDeviceSynchronize();
cuda_ret = cudaMemcpy(in_d, in_h, sectcnt * N * sizeof(float),cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy input audio to device");
cudaDeviceSynchronize();
/* Launch the song process kernel */
dim_block.x = BLOCK_SIZE; dim_block.y = dim_block.z = 1;
dim_grid.x = (sectcnt - 1)/THREADS_PER_BLOCK + 1; dim_grid.y = dim_grid.z = 1;
parallelhash<<<dim_grid, dim_block>>>(in_d, sectcnt, hash_songs, mysongid);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Kernel failed");
cudaFree(in_d);
fclose(inp);
free(G);
return csize>>6; //Since we can't count the hashes directly, we just use the song length
}
int main(int argc, char * argv[])
{
int i = 0, n=0;
float count = 0;
int numsongs = 0;
char filenames [MAXSONGS+1][_TINYDIR_FILENAME_MAX];
int filesizes [MAXSONGS+1];
int songscores [MAXSONGS+1];
float songmatch [MAXSONGS+1];
clock_t start, diff;
clock_t start_total, diff_total;
int msec;
int * hash_songs;
printf("GPU Pazam running... \n");
if(argc<2)
{
printf("no excerpt file to open \n");
exit(1);
}
start_total = clock();
cudaError_t cuda_ret;
cuda_ret = cudaMalloc((void**)&hash_songs, MAXELEMS*(MAXSONGS+1)*sizeof(int));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate output device memory");
cuda_ret = cudaMemset(hash_songs, 0, MAXELEMS*(MAXSONGS+1)*sizeof(int));
if(cuda_ret != cudaSuccess) FATAL("Unable to zero out device memory");
printf("Generating hashes for original files.. \n");
tinydir_dir dir;
tinydir_open(&dir, "../data");
while (dir.has_next)
{
tinydir_file file;
tinydir_readfile(&dir, &file);
if (file.is_reg)
{
numsongs++;
start = clock();
filesizes[numsongs] = generatehashes(file.path, numsongs, hash_songs);
diff = clock() - start;
msec = diff * 1000 / CLOCKS_PER_SEC;
printf("%d:%d length for %s\n", numsongs, filesizes[numsongs], file.path);
printf("Time taken: %d seconds %d milliseconds\n", msec/1000, msec%1000);
strcpy(filenames[numsongs],file.name);
}
tinydir_next(&dir);
}
tinydir_close(&dir);
printf("Generating hashes for recorded file.. \n");
generatehashes(argv[1], 0, hash_songs);
printf("Calculating score.. \n");
int * songscores_d;
cuda_ret = cudaMalloc((void**)&songscores_d, (MAXSONGS+1)*sizeof(int));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate output device memory");
cuda_ret = cudaMemset(songscores_d, 0, (MAXSONGS+1)*sizeof(int));
if(cuda_ret != cudaSuccess) FATAL("Unable to zero out device memory");
dim3 dim_grid, dim_block;
dim_block.x = BLOCK_SIZE; dim_block.y = dim_block.z = 1;
dim_grid.x = (MAXELEMS - 1)/THREADS_PER_BLOCK + 1; dim_grid.y = dim_grid.z = 1;
calc_scores<<<dim_grid, dim_block>>>(hash_songs, songscores_d);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Kernel failed");
cuda_ret = cudaMemcpy(songscores, songscores_d, (MAXSONGS+1)*sizeof(int), cudaMemcpyDeviceToHost);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy output to host");
cudaDeviceSynchronize();
cudaFree(songscores_d);
cudaFree(hash_songs);
for(i = 1; i<=numsongs; i++){
songmatch[i] = ((float)songscores[i])/((float)filesizes[i]);
printf("Score for %s = %f\n", filenames[i], songmatch[i]);
if(songmatch[i]>count){
count = songmatch[i];
n = i;
}
}
printf("Best Score: %s\n", filenames[n]);
diff_total = clock() - start_total;
msec = diff_total * 1000 / CLOCKS_PER_SEC;
printf("Total time taken: %d seconds %d milliseconds\n", msec/1000, msec%1000);
return 0;
}
|
67c85663a344a5b6a7d3f2a3bb314acab61c9c93.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "tensor.h"
#include <assert.h>
#include "t_contract_d.h"
#include "../common.h"
#include <hip/hip_runtime.h>
#include "rocblas.h"
#define BLOCKSIZE 32
void matMul(double *A,double *B,int M,int N, int K,double *C)
{
int i,j,kk;
double interm_sum;
for (i=0;i<M;i++){
for (j=0;j<N;j++){
interm_sum=0;
for (kk=0;kk<K;kk++){
interm_sum+=A[i*K+kk]*B[kk*N+j];
}
C[i*N+j]=interm_sum;
}
}
}
__global__ void mat_transpose_kernel(const double *mat_in, double *mat_out, int rows, int cols)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < cols && idy < rows)
{
unsigned int pos = idy * cols + idx;
unsigned int trans_pos = idx * rows + idy;
mat_out[trans_pos] = mat_in[pos];
}
}
void mat_transpose_gpu(const double* mat_in, double* mat_out, int rows, int cols){
double *d_mat_in,*d_mat_out;
CHECK(hipMalloc((void**)&d_mat_in,rows*cols*sizeof(double)));
CHECK(hipMalloc((void**)&d_mat_out,rows*cols*sizeof(double)));
CHECK(hipMemcpy(d_mat_in, mat_in, rows*cols*sizeof(double), hipMemcpyHostToDevice));
dim3 block (BLOCKSIZE,BLOCKSIZE);
dim3 grid ((cols+block.x-1)/block.x,(rows+block.y-1)/block.y);
// printf("M %d, N %d grid %d %d\n",rows,cols,grid.y,grid.x );
// Invoke kernel
hipLaunchKernelGGL(( mat_transpose_kernel), dim3(grid), dim3(block), 0, 0, d_mat_in,d_mat_out,rows,cols);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
// Read C from device memory
hipMemcpy(mat_out, d_mat_out, rows*cols*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
hipFree(d_mat_in);
hipFree(d_mat_out);
}
// Get a matrix element
__device__ double GetElement(const double *A,int stride, int row, int col)
{
return A[row * stride + col];
}
// Set a matrix element
__device__ void SetElement(double *A, int row, int col,int stride,
double value)
{
A[row * stride + col] = value;
}
__global__ void matMul_kernel1(double *d_A,double *d_B,int M, int N,int K,double *d_C){
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
if(i<M && j<N){
double sum=0;
for(int ki=0; ki < K;ki++){
sum+=d_A[i*K+ki]*d_B[ki*N+j];
}
d_C[i*N+j]=sum;
}
}
void matMul_gpu1(const double *A, const double *B, int M,int N,int K,double *C)
{
double *d_A,*d_B,*d_C;
CHECK(hipMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(hipMemcpy(d_A, A, M*K*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, K*N*sizeof(double), hipMemcpyHostToDevice));
dim3 block (BLOCKSIZE,BLOCKSIZE);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
// printf("M %d, N %d grid %d %d\n",M,N,grid.y,grid.x );
// Invoke kernel
hipLaunchKernelGGL(( matMul_kernel1), dim3(grid), dim3(block), 0, 0, d_A, d_B,M,N,K,d_C);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
// Read C from device memory
hipMemcpy(C, d_C, M*N*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
__global__ void matMul_kernel2(const double *d_A_T,const double *d_B,int M, int N,int K,double *d_C){
int j = blockIdx.x*blockDim.x+threadIdx.x;
int i = blockIdx.y*blockDim.y+threadIdx.y;
if(i<M && j<N){
double sum=0;
for(int ki=0; ki < K;ki++){
sum+=d_A_T[ki*M+i]*d_B[ki*N+j];
}
d_C[i*N+j]=sum;
}
}
void matMul_gpu2(const double *A, const double *B, int M,int N,int K,double *C)
{
double *d_A,*d_B,*d_A_T,*d_C;
CHECK(hipMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(hipMalloc((void**)&d_A_T,M*K*sizeof(double)));
CHECK(hipMemcpy(d_A, A, M*K*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, K*N*sizeof(double), hipMemcpyHostToDevice));
dim3 block (BLOCKSIZE,BLOCKSIZE);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
dim3 grid_A_T ((K+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
hipLaunchKernelGGL(( mat_transpose_kernel), dim3(grid_A_T), dim3(block), 0, 0, d_A, d_A_T, M, K);
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( matMul_kernel2), dim3(grid), dim3(block), 0, 0, d_A_T, d_B,M,N,K,d_C);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
// Read C from device memory
hipMemcpy(C, d_C, M*N*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
__global__ void matMul_kernel_sm(double *d_A,double *d_B,int M, int N,int K,double *d_C){
double CValue = 0;
int row = blockIdx.y*BLOCKSIZE + threadIdx.y;
int col = blockIdx.x*BLOCKSIZE + threadIdx.x;
__shared__ double As[BLOCKSIZE][BLOCKSIZE];
__shared__ double Bs[BLOCKSIZE][BLOCKSIZE];
for (int kk = 0; kk < (BLOCKSIZE + K - 1)/BLOCKSIZE; kk++) {
if (kk*BLOCKSIZE + threadIdx.x < K && row < M)
As[threadIdx.y][threadIdx.x] = d_A[row*K + kk*BLOCKSIZE + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (kk*BLOCKSIZE + threadIdx.y < K && col < N)
Bs[threadIdx.y][threadIdx.x] = d_B[(kk*BLOCKSIZE + threadIdx.y)*N + col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < BLOCKSIZE; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (row < M && col < N) d_C[(row*N) +col] = CValue;
}
void matMul_gpu_sm(const double *A, const double *B, int M,int N,int K,double *C)
{
double *d_A,*d_B,*d_C;
CHECK(hipMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(hipMemcpy(d_A, A, M*K*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, K*N*sizeof(double), hipMemcpyHostToDevice));
dim3 block (BLOCKSIZE,BLOCKSIZE);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
hipLaunchKernelGGL(( matMul_kernel_sm), dim3(grid), dim3(block), 0, 0, d_A, d_B,M,N,K,d_C);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
// Read C from device memory
hipMemcpy(C, d_C, M*N*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
__global__ void matMul_kernel_sm_coa(double *d_A,double *d_B_T,int M, int N,int K,double *d_C){
double CValue = 0;
int row = blockIdx.y*BLOCKSIZE + threadIdx.y;
int col = blockIdx.x*BLOCKSIZE + threadIdx.x;
__shared__ double As[BLOCKSIZE][BLOCKSIZE];
__shared__ double Bs[BLOCKSIZE][BLOCKSIZE];
for (int kk = 0; kk < (BLOCKSIZE + K - 1)/BLOCKSIZE; kk++) {
if (kk*BLOCKSIZE + threadIdx.x < K && row < M)
As[threadIdx.y][threadIdx.x] = d_A[row*K + kk*BLOCKSIZE + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (kk*BLOCKSIZE + threadIdx.y < K && col < N)
Bs[threadIdx.y][threadIdx.x] = d_B_T[col*K + (kk*BLOCKSIZE + threadIdx.y)];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < BLOCKSIZE; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (row < M && col < N) d_C[(row*N) +col] = CValue;
}
void matMul_gpu_sm_coa(const double *A, const double *B, int M,int N,int K,double *C)
{
double *d_A,*d_B_T,*d_B,*d_C;
CHECK(hipMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_B_T,N*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(hipMemcpy(d_A, A, M*K*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, K*N*sizeof(double), hipMemcpyHostToDevice));
dim3 block (BLOCKSIZE,BLOCKSIZE);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
dim3 grid_B_T ((N+block.x-1)/block.x,(K+block.y-1)/block.y);
// printf("M %d, N %d, K %d grid %d %d\n",M,N,K,grid.y,grid.x );
// Invoke kernel
hipLaunchKernelGGL(( mat_transpose_kernel), dim3(grid_B_T), dim3(block), 0, 0, d_B, d_B_T, K, N);
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( matMul_kernel_sm_coa), dim3(grid), dim3(block), 0, 0, d_A, d_B_T,M,N,K,d_C);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
// Read C from device memory
hipMemcpy(C, d_C, M*N*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipFree(d_B_T);
}
void matMul_cublas(const double *A, const double *B, int M,int N,int K,double *C)
{
double *d_A,*d_B,*d_C;
CHECK(hipMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(hipMemcpy(d_A, A, M*K*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, K*N*sizeof(double), hipMemcpyHostToDevice));
hipblasStatus_t stat;
hipblasHandle_t handle;
stat = hipblasCreate(&handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
}
const double alpha=1.0;
const double beta=0.0;
// Invoke kernel
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, M, K,&alpha,(const double *)d_B, N,(const double *)d_A, K,&beta,(double *)d_C, N);
CHECK(hipDeviceSynchronize());
hipblasDestroy(handle);
// Read C from device memory
hipMemcpy(C, d_C, M*N*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
__global__ void matMul_kernel_dsm(double *d_A,double *d_B,int M, int N,int K,double *d_C){
double CValue = 0;
// should be the same
assert(blockDim.x==blockDim.y);
const int block_dim=blockDim.x;
int row = blockIdx.y*block_dim + threadIdx.y;
int col = blockIdx.x*block_dim + threadIdx.x;
extern __shared__ double Sm[];
double *As=Sm;
double *Bs=(double *)&Sm[block_dim*block_dim];
for (int kk = 0; kk < (block_dim + K - 1)/block_dim; kk++) {
if (kk*block_dim + threadIdx.x < K && row < M)
As[threadIdx.y*block_dim+threadIdx.x] = d_A[row*K + kk*block_dim + threadIdx.x];
else
As[threadIdx.y*block_dim+threadIdx.x] = 0.0;
if (kk*block_dim + threadIdx.y < K && col < N)
Bs[threadIdx.y*block_dim+threadIdx.x] = d_B[(kk*block_dim + threadIdx.y)*N + col];
else
Bs[threadIdx.y*block_dim+threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < block_dim; ++n) CValue += As[threadIdx.y*block_dim+n] * Bs[n*block_dim+threadIdx.x];
__syncthreads();
}
if (row < M && col < N) d_C[(row*N) +col] = CValue;
}
void matMul_gpu_dsm(const double *A, const double *B, int M,int N,int K,double *C,int threads_block)
{
double *d_A,*d_B,*d_C;
CHECK(hipMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(hipMemcpy(d_A, A, M*K*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, K*N*sizeof(double), hipMemcpyHostToDevice));
int block_dim=(int)sqrt(threads_block);
threads_block=block_dim*block_dim;
dim3 block (block_dim,block_dim);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
hipLaunchKernelGGL(( matMul_kernel_dsm), dim3(grid), dim3(block),2*threads_block*sizeof(double), 0, d_A, d_B,M,N,K,d_C);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
// Read C from device memory
hipMemcpy(C, d_C, M*N*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
| 67c85663a344a5b6a7d3f2a3bb314acab61c9c93.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "tensor.h"
#include <assert.h>
#include "t_contract_d.h"
#include "../common.h"
#include <cuda_runtime.h>
#include "cublas_v2.h"
#define BLOCKSIZE 32
void matMul(double *A,double *B,int M,int N, int K,double *C)
{
int i,j,kk;
double interm_sum;
for (i=0;i<M;i++){
for (j=0;j<N;j++){
interm_sum=0;
for (kk=0;kk<K;kk++){
interm_sum+=A[i*K+kk]*B[kk*N+j];
}
C[i*N+j]=interm_sum;
}
}
}
__global__ void mat_transpose_kernel(const double *mat_in, double *mat_out, int rows, int cols)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < cols && idy < rows)
{
unsigned int pos = idy * cols + idx;
unsigned int trans_pos = idx * rows + idy;
mat_out[trans_pos] = mat_in[pos];
}
}
void mat_transpose_gpu(const double* mat_in, double* mat_out, int rows, int cols){
double *d_mat_in,*d_mat_out;
CHECK(cudaMalloc((void**)&d_mat_in,rows*cols*sizeof(double)));
CHECK(cudaMalloc((void**)&d_mat_out,rows*cols*sizeof(double)));
CHECK(cudaMemcpy(d_mat_in, mat_in, rows*cols*sizeof(double), cudaMemcpyHostToDevice));
dim3 block (BLOCKSIZE,BLOCKSIZE);
dim3 grid ((cols+block.x-1)/block.x,(rows+block.y-1)/block.y);
// printf("M %d, N %d grid %d %d\n",rows,cols,grid.y,grid.x );
// Invoke kernel
mat_transpose_kernel<<<grid, block>>>(d_mat_in,d_mat_out,rows,cols);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
// Read C from device memory
cudaMemcpy(mat_out, d_mat_out, rows*cols*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
cudaFree(d_mat_in);
cudaFree(d_mat_out);
}
// Get a matrix element
__device__ double GetElement(const double *A,int stride, int row, int col)
{
return A[row * stride + col];
}
// Set a matrix element
__device__ void SetElement(double *A, int row, int col,int stride,
double value)
{
A[row * stride + col] = value;
}
__global__ void matMul_kernel1(double *d_A,double *d_B,int M, int N,int K,double *d_C){
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
if(i<M && j<N){
double sum=0;
for(int ki=0; ki < K;ki++){
sum+=d_A[i*K+ki]*d_B[ki*N+j];
}
d_C[i*N+j]=sum;
}
}
void matMul_gpu1(const double *A, const double *B, int M,int N,int K,double *C)
{
double *d_A,*d_B,*d_C;
CHECK(cudaMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, M*K*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, K*N*sizeof(double), cudaMemcpyHostToDevice));
dim3 block (BLOCKSIZE,BLOCKSIZE);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
// printf("M %d, N %d grid %d %d\n",M,N,grid.y,grid.x );
// Invoke kernel
matMul_kernel1<<<grid, block>>>(d_A, d_B,M,N,K,d_C);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
// Read C from device memory
cudaMemcpy(C, d_C, M*N*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matMul_kernel2(const double *d_A_T,const double *d_B,int M, int N,int K,double *d_C){
int j = blockIdx.x*blockDim.x+threadIdx.x;
int i = blockIdx.y*blockDim.y+threadIdx.y;
if(i<M && j<N){
double sum=0;
for(int ki=0; ki < K;ki++){
sum+=d_A_T[ki*M+i]*d_B[ki*N+j];
}
d_C[i*N+j]=sum;
}
}
void matMul_gpu2(const double *A, const double *B, int M,int N,int K,double *C)
{
double *d_A,*d_B,*d_A_T,*d_C;
CHECK(cudaMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(cudaMalloc((void**)&d_A_T,M*K*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, M*K*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, K*N*sizeof(double), cudaMemcpyHostToDevice));
dim3 block (BLOCKSIZE,BLOCKSIZE);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
dim3 grid_A_T ((K+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
mat_transpose_kernel<<<grid_A_T, block>>>(d_A, d_A_T, M, K);
CHECK(cudaDeviceSynchronize());
matMul_kernel2<<<grid, block>>>(d_A_T, d_B,M,N,K,d_C);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
// Read C from device memory
cudaMemcpy(C, d_C, M*N*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matMul_kernel_sm(double *d_A,double *d_B,int M, int N,int K,double *d_C){
double CValue = 0;
int row = blockIdx.y*BLOCKSIZE + threadIdx.y;
int col = blockIdx.x*BLOCKSIZE + threadIdx.x;
__shared__ double As[BLOCKSIZE][BLOCKSIZE];
__shared__ double Bs[BLOCKSIZE][BLOCKSIZE];
for (int kk = 0; kk < (BLOCKSIZE + K - 1)/BLOCKSIZE; kk++) {
if (kk*BLOCKSIZE + threadIdx.x < K && row < M)
As[threadIdx.y][threadIdx.x] = d_A[row*K + kk*BLOCKSIZE + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (kk*BLOCKSIZE + threadIdx.y < K && col < N)
Bs[threadIdx.y][threadIdx.x] = d_B[(kk*BLOCKSIZE + threadIdx.y)*N + col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < BLOCKSIZE; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (row < M && col < N) d_C[(row*N) +col] = CValue;
}
void matMul_gpu_sm(const double *A, const double *B, int M,int N,int K,double *C)
{
double *d_A,*d_B,*d_C;
CHECK(cudaMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, M*K*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, K*N*sizeof(double), cudaMemcpyHostToDevice));
dim3 block (BLOCKSIZE,BLOCKSIZE);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
matMul_kernel_sm<<<grid, block>>>(d_A, d_B,M,N,K,d_C);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
// Read C from device memory
cudaMemcpy(C, d_C, M*N*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matMul_kernel_sm_coa(double *d_A,double *d_B_T,int M, int N,int K,double *d_C){
double CValue = 0;
int row = blockIdx.y*BLOCKSIZE + threadIdx.y;
int col = blockIdx.x*BLOCKSIZE + threadIdx.x;
__shared__ double As[BLOCKSIZE][BLOCKSIZE];
__shared__ double Bs[BLOCKSIZE][BLOCKSIZE];
for (int kk = 0; kk < (BLOCKSIZE + K - 1)/BLOCKSIZE; kk++) {
if (kk*BLOCKSIZE + threadIdx.x < K && row < M)
As[threadIdx.y][threadIdx.x] = d_A[row*K + kk*BLOCKSIZE + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (kk*BLOCKSIZE + threadIdx.y < K && col < N)
Bs[threadIdx.y][threadIdx.x] = d_B_T[col*K + (kk*BLOCKSIZE + threadIdx.y)];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < BLOCKSIZE; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (row < M && col < N) d_C[(row*N) +col] = CValue;
}
void matMul_gpu_sm_coa(const double *A, const double *B, int M,int N,int K,double *C)
{
double *d_A,*d_B_T,*d_B,*d_C;
CHECK(cudaMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B_T,N*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, M*K*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, K*N*sizeof(double), cudaMemcpyHostToDevice));
dim3 block (BLOCKSIZE,BLOCKSIZE);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
dim3 grid_B_T ((N+block.x-1)/block.x,(K+block.y-1)/block.y);
// printf("M %d, N %d, K %d grid %d %d\n",M,N,K,grid.y,grid.x );
// Invoke kernel
mat_transpose_kernel<<<grid_B_T, block>>>(d_B, d_B_T, K, N);
CHECK(cudaDeviceSynchronize());
matMul_kernel_sm_coa<<<grid, block>>>(d_A, d_B_T,M,N,K,d_C);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
// Read C from device memory
cudaMemcpy(C, d_C, M*N*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_B_T);
}
void matMul_cublas(const double *A, const double *B, int M,int N,int K,double *C)
{
double *d_A,*d_B,*d_C;
CHECK(cudaMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, M*K*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, K*N*sizeof(double), cudaMemcpyHostToDevice));
cublasStatus_t stat;
cublasHandle_t handle;
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
}
const double alpha=1.0;
const double beta=0.0;
// Invoke kernel
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, M, K,&alpha,(const double *)d_B, N,(const double *)d_A, K,&beta,(double *)d_C, N);
CHECK(cudaDeviceSynchronize());
cublasDestroy(handle);
// Read C from device memory
cudaMemcpy(C, d_C, M*N*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matMul_kernel_dsm(double *d_A,double *d_B,int M, int N,int K,double *d_C){
double CValue = 0;
// should be the same
assert(blockDim.x==blockDim.y);
const int block_dim=blockDim.x;
int row = blockIdx.y*block_dim + threadIdx.y;
int col = blockIdx.x*block_dim + threadIdx.x;
extern __shared__ double Sm[];
double *As=Sm;
double *Bs=(double *)&Sm[block_dim*block_dim];
for (int kk = 0; kk < (block_dim + K - 1)/block_dim; kk++) {
if (kk*block_dim + threadIdx.x < K && row < M)
As[threadIdx.y*block_dim+threadIdx.x] = d_A[row*K + kk*block_dim + threadIdx.x];
else
As[threadIdx.y*block_dim+threadIdx.x] = 0.0;
if (kk*block_dim + threadIdx.y < K && col < N)
Bs[threadIdx.y*block_dim+threadIdx.x] = d_B[(kk*block_dim + threadIdx.y)*N + col];
else
Bs[threadIdx.y*block_dim+threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < block_dim; ++n) CValue += As[threadIdx.y*block_dim+n] * Bs[n*block_dim+threadIdx.x];
__syncthreads();
}
if (row < M && col < N) d_C[(row*N) +col] = CValue;
}
void matMul_gpu_dsm(const double *A, const double *B, int M,int N,int K,double *C,int threads_block)
{
double *d_A,*d_B,*d_C;
CHECK(cudaMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, M*K*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, K*N*sizeof(double), cudaMemcpyHostToDevice));
int block_dim=(int)sqrt(threads_block);
threads_block=block_dim*block_dim;
dim3 block (block_dim,block_dim);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
matMul_kernel_dsm<<<grid, block,2*threads_block*sizeof(double)>>>(d_A, d_B,M,N,K,d_C);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
// Read C from device memory
cudaMemcpy(C, d_C, M*N*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
dab43a6ccf162357dcdb34da0798adad78404181.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <decoupled_lookback.cuh>
#include "test_utils.h"
namespace MLCommon {
template <int TPB>
__global__ void dlbTestKernel(void *workspace, int len, int *out) {
DecoupledLookBack<int> dlb(workspace);
int count = threadIdx.x == blockDim.x - 1 ? 1 : 0;
auto prefix = dlb(count);
if (threadIdx.x == blockDim.x - 1) out[blockIdx.x] = prefix;
}
void dlbTest(int len, int *out) {
constexpr int TPB = 256;
int nblks = len;
size_t workspaceSize = DecoupledLookBack<int>::computeWorkspaceSize(nblks);
char *workspace;
allocate(workspace, workspaceSize);
CUDA_CHECK(hipMemset(workspace, 0, workspaceSize));
hipLaunchKernelGGL(( dlbTestKernel<TPB>), dim3(nblks), dim3(TPB), 0, 0, workspace, len, out);
CUDA_CHECK(hipPeekAtLastError());
CUDA_CHECK(hipFree(workspace));
}
struct DlbInputs {
int len;
};
::std::ostream &operator<<(::std::ostream &os, const DlbInputs &dims) {
return os;
}
class DlbTest : public ::testing::TestWithParam<DlbInputs> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<DlbInputs>::GetParam();
int len = params.len;
allocate(out, len);
dlbTest(len, out);
}
void TearDown() override { CUDA_CHECK(hipFree(out)); }
protected:
DlbInputs params;
int *out;
};
template <typename T, typename L>
::testing::AssertionResult devArrMatchCustom(const T *actual, size_t size,
L eq_compare,
hipStream_t stream = 0) {
std::vector<T> act_h(size);
updateHost<T>(&(act_h[0]), actual, size, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (size_t i(0); i < size; ++i) {
auto act = act_h[i];
auto expected = (T)i;
if (!eq_compare(expected, act)) {
return ::testing::AssertionFailure()
<< "actual=" << act << " != expected=" << expected << " @" << i;
}
}
return ::testing::AssertionSuccess();
}
const std::vector<DlbInputs> inputs = {{4}, {16}, {64}, {256}, {2048}};
TEST_P(DlbTest, Result) {
ASSERT_TRUE(devArrMatchCustom(out, params.len, Compare<int>()));
}
INSTANTIATE_TEST_CASE_P(DlbTests, DlbTest, ::testing::ValuesIn(inputs));
} // end namespace MLCommon
| dab43a6ccf162357dcdb34da0798adad78404181.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <decoupled_lookback.cuh>
#include "test_utils.h"
namespace MLCommon {
template <int TPB>
__global__ void dlbTestKernel(void *workspace, int len, int *out) {
DecoupledLookBack<int> dlb(workspace);
int count = threadIdx.x == blockDim.x - 1 ? 1 : 0;
auto prefix = dlb(count);
if (threadIdx.x == blockDim.x - 1) out[blockIdx.x] = prefix;
}
void dlbTest(int len, int *out) {
constexpr int TPB = 256;
int nblks = len;
size_t workspaceSize = DecoupledLookBack<int>::computeWorkspaceSize(nblks);
char *workspace;
allocate(workspace, workspaceSize);
CUDA_CHECK(cudaMemset(workspace, 0, workspaceSize));
dlbTestKernel<TPB><<<nblks, TPB>>>(workspace, len, out);
CUDA_CHECK(cudaPeekAtLastError());
CUDA_CHECK(cudaFree(workspace));
}
struct DlbInputs {
int len;
};
::std::ostream &operator<<(::std::ostream &os, const DlbInputs &dims) {
return os;
}
class DlbTest : public ::testing::TestWithParam<DlbInputs> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<DlbInputs>::GetParam();
int len = params.len;
allocate(out, len);
dlbTest(len, out);
}
void TearDown() override { CUDA_CHECK(cudaFree(out)); }
protected:
DlbInputs params;
int *out;
};
template <typename T, typename L>
::testing::AssertionResult devArrMatchCustom(const T *actual, size_t size,
L eq_compare,
cudaStream_t stream = 0) {
std::vector<T> act_h(size);
updateHost<T>(&(act_h[0]), actual, size, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (size_t i(0); i < size; ++i) {
auto act = act_h[i];
auto expected = (T)i;
if (!eq_compare(expected, act)) {
return ::testing::AssertionFailure()
<< "actual=" << act << " != expected=" << expected << " @" << i;
}
}
return ::testing::AssertionSuccess();
}
const std::vector<DlbInputs> inputs = {{4}, {16}, {64}, {256}, {2048}};
TEST_P(DlbTest, Result) {
ASSERT_TRUE(devArrMatchCustom(out, params.len, Compare<int>()));
}
INSTANTIATE_TEST_CASE_P(DlbTests, DlbTest, ::testing::ValuesIn(inputs));
} // end namespace MLCommon
|
b2e59b334ed1d0edc10589ba4a25837db040d5fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Descrio: Multiplicao de matrizes em paralelo usando GPU
* Entrada: Dimenso das matrizes e dos blocos de threads
* Sada: Tempos de execuo
*/
#include <stdio.h>
#include <sys/time.h>
#include <iostream>
using namespace std;
/**
* O agumento deve ser double
*/
#define GET_TIME(now) { \
struct timespec time; \
clock_gettime(CLOCK_MONOTONIC_RAW, &time); \
now = time.tv_sec + time.tv_nsec/1000000000.0; \
}
/**
* Para checar erros em chamadas Cuda
*/
#define CUDA_SAFE_CALL(call) { \
hipError_t err = call; \
if(err != hipSuccess) { \
fprintf(stderr,"Erro no arquivo '%s', linha %i: %s.\n",__FILE__, __LINE__,hipGetErrorString(err)); \
exit(EXIT_FAILURE); } \
}
/**
* Funcao para execucao sequencial
*/
void multMatSeq(double *a, double *b, double *c, int mA, int nAmB, int nB) {
int i, j, k;
double soma;
for(i=0; i<mA; i++)
for(j=0; j<nB; j++) {
soma = 0;
for(k=0; k<nAmB; k++) {
soma += a[i*nAmB+k] * b[k*nB+j];
}
c[i*nB+j] = soma;
}
}
/**
* Kernel para execucao paralela em CUDA
*/
__global__ void multMatPar(double *a, double *b, double *c, int mA, int nAmB, int nB) {
// Coordenadas globais da thread
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// Coordenadas locais da thread
int i_bloco = threadIdx.x;
int j_bloco = threadIdx.y;
extern __shared__ double mat_sub[];
// Memria compartilhada para a submatriz de A
double* Asub = (double*) mat_sub;
// Memria compartilhada para a submatriz de B
double* Bsub= (double*) &Asub[blockDim.x*blockDim.y];
double valor = 0;
for(int passo=0; passo<nAmB; passo+=blockDim.y) {
if (i < mA && (passo+j_bloco) < nAmB)
Asub[i_bloco*blockDim.y+j_bloco] = a[i*nAmB+passo+j_bloco];
else
Asub[i_bloco*blockDim.y+j_bloco] = 0;
if ((passo+i_bloco) < nAmB && j < nB)
Bsub[i_bloco*blockDim.y+j_bloco] = b[(passo+i_bloco)*nB+j];
else
Bsub[i_bloco*blockDim.y+j_bloco] = 0;
__syncthreads();
if (i < mA && j < nB)
for (int k = 0; k < blockDim.y; k++) {
valor += Asub[i_bloco*blockDim.y+k] * Bsub[k*blockDim.y+j_bloco];
}
__syncthreads();
}
if (i < mA && j < nB)
c[i*nB+j] = valor;
}
/**
* Funo que aloca espaco para uma matriz e preenche seus valores
* Entrada: matriz de entrada, dimensoes da matriz
* Sada: retorna 1 se a matriz foi preenchida com sucesso e 0 caso contrario
*/
int preencheMatriz(double **mat, int linhas, int colunas) {
int i, j;
//aloca espaco de memoria para a matriz
*mat = (double*) malloc(sizeof(double) * linhas * colunas);
if (mat == NULL) return 0;
//preenche o vetor
for (i=0; i<linhas; i++) {
for (j=0; j<colunas; j++) {
*((*mat) + (i*colunas+j)) = 1.5;
}
}
return 1;
}
void checkResults(double *mat1, double *mat2, int m, int n){
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
if (fabs(mat1[i*n+j] - mat2[i*n+j]) > 1e-5) {
cerr << "Resultado incorreto em " << i << " x " << j << " -> " << mat1[i*n+j] << " " << mat2[i*n+j] << endl;
exit(EXIT_FAILURE);
}
}
}
}
/**
* Imprime os resultados do programa
*/
void printResults(unsigned int mA, unsigned int nA, unsigned int mB, unsigned int nB, unsigned int blockLines, unsigned int blockColumns, double tempoSeq, float delta_eventos, double initialParTime, double finalParTime, bool csv = true){
if (csv) {
cout << mA << ";" << nA << ";" << mB << ";" << nB << ";" << blockLines << ";" << blockColumns << ";" << tempoSeq << ";" << delta_eventos/1000 << ";" << initialParTime << ";" << finalParTime << ";" << endl;
} else {
cout << "Dimenses da matriz A = " << mA << " x " << nA << endl
<< "Dimenses da matriz B = " << mB << " x " << nB << endl
<< "Dimenses dos blocos = " << blockLines << " x " << blockColumns << endl
<< "Tempo sequencial = "<< tempoSeq <<" seg" << endl
<< "Tempo paralelo kernel = "<< delta_eventos/1000 << " seg" << endl
<< "Tempo paralelo begin = "<< initialParTime <<" seg" << endl
<< "Tempo paralelo end = "<< finalParTime <<" seg" << endl
<< "Tempo paralelo total = "<< initialParTime+(delta_eventos/1000)+finalParTime <<" seg" << endl;
}
}
//funcao principal
int main(int argc, char** argv) {
double *h_a, *h_b, *h_c, *h_c_seq; //matrizes host
double *d_a, *d_b, *d_c; //matrizes device
//para medidas de tempo
double begin, end, initialParTime, finalParTime, tempoSeq;
hipEvent_t start, stop;
//entrada de dados
unsigned int mA, nA, mB, nB; // Dimenso das matrizes de entrada
long int bytesA, bytesB, bytesC; //qtde bytes por matriz
//tamanho dos blocos de threads
unsigned int blockLines, blockColumns;
//le e valida os parametros de entrada
if(argc < 6) {
cerr << "Digite: "<< argv[0] <<" <n de linhas da matriz A> <n de colunas da matriz A> <n de linhas da matriz B> <n de colunas da matriz B> <n de linhas e colunas dos blocos>" << endl;
exit(EXIT_FAILURE);
}
//dimensao das matrizes e tamanho dos blocos
mA = atol(argv[1]);
nA = atol(argv[2]);
mB = atol(argv[3]);
nB = atol(argv[4]);
blockLines = atol(argv[5]);
blockColumns = atol(argv[5]);
if (nA != mB) {
cerr << "Impossvel executar multiplicao das matrizes. Nmero de colunas da matriz A ("<< nA <<") no bate com o nmero de colunas da matriz B ("<< mB <<")" << endl;
exit(EXIT_FAILURE);
}
//calcula o tamanho em bytes das matrizes
bytesA = mA*nA*sizeof(double);
bytesB = mB*nB*sizeof(double);
bytesC = mA*nB*sizeof(double);
// Aloca e preenche a matriz de entrada A
if (preencheMatriz(&h_a, mA, nA) == 0) {
cerr << "Erro de preenchimento da matriz de entrada A" << endl;
exit(EXIT_FAILURE);
}
// Aloca e preenche a matriz de entrada B
if (preencheMatriz(&h_b, mB, nB) == 0) {
cerr << "Erro de preenchimento da matriz de entrada B" << endl;
exit(EXIT_FAILURE);
}
// Aloca a matriz de sada paralelo
h_c = (double*) malloc(bytesC);
if (h_c == NULL) {
cerr << "Erro de alocacao da matriz de saida" << endl;
exit(EXIT_FAILURE);
}
// Aloca a matriz de sada sequencial
h_c_seq = (double*) malloc(bytesC);
if (h_c_seq == NULL) {
cerr << "Erro de alocacao da matriz de saida" << endl;
exit(EXIT_FAILURE);
}
//!!! ------------------------ executa sequencial ---------------------------------- !!!//
GET_TIME(begin);
multMatSeq(h_a, h_b, h_c_seq, mA, nA, nB);
GET_TIME(end);
tempoSeq = end-begin; // calcula o tempo sequencial em segundos
//!!! ------------------------ executa em paralelo em CUDA -------------------------- !!!//
GET_TIME(begin);
// Aloca espao para as matrizes na GPU
CUDA_SAFE_CALL(hipMalloc((void**) &d_a, bytesA));
CUDA_SAFE_CALL(hipMalloc((void**) &d_b, bytesB));
CUDA_SAFE_CALL(hipMalloc((void**) &d_c, bytesC));
// Copia as matrizes de entrada da CPU para a GPU (host para device)
CUDA_SAFE_CALL(hipMemcpy(d_a, h_a, bytesA, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_b, h_b, bytesB, hipMemcpyHostToDevice));
// Invoca o kernel com blocos de tamanhos fixos
dim3 threadsBloco = {blockLines, blockColumns, 1};
dim3 blocosGrade = {(mA + threadsBloco.x - 1)/threadsBloco.x, (nB + threadsBloco.y - 1)/threadsBloco.y, 1};
int tamMemCompartilhada = blockLines*blockColumns*8*2;
GET_TIME(end);
initialParTime = end-begin; // Calcula o tempo das inicializaes paralelo em segundos
//dispara o kernel
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
CUDA_SAFE_CALL(hipEventRecord(start));
hipLaunchKernelGGL(( multMatPar), dim3(blocosGrade), dim3(threadsBloco), tamMemCompartilhada, 0, d_a, d_b, d_c, mA, nA, nB);
CUDA_SAFE_CALL(hipGetLastError());
CUDA_SAFE_CALL(hipEventRecord(stop));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
float delta_eventos = 0;
CUDA_SAFE_CALL(hipEventElapsedTime(&delta_eventos, start, stop));
//copia resultado da GPU para a CPU (device para host)
GET_TIME(begin);
CUDA_SAFE_CALL(hipMemcpy(h_c, d_c, bytesC, hipMemcpyDeviceToHost))
GET_TIME(end);
finalParTime = end-begin; // calcula o tempo das finalizacoes paralelo em segundos
checkResults(h_c_seq, h_c, mA, nB);
// Libera a memria na GPU
CUDA_SAFE_CALL(hipFree(d_a));
CUDA_SAFE_CALL(hipFree(d_b));
CUDA_SAFE_CALL(hipFree(d_c));
// Libera a memria na CPU
free(h_a);
free(h_b);
free(h_c);
//------------------------------- imprime dos tempos de execucao ----------------------//
printResults(mA, nA, mB, nB, blockLines, blockColumns, tempoSeq, delta_eventos, initialParTime, finalParTime);
return 0;
}
| b2e59b334ed1d0edc10589ba4a25837db040d5fc.cu | /**
* Descrição: Multiplicação de matrizes em paralelo usando GPU
* Entrada: Dimensão das matrizes e dos blocos de threads
* Saída: Tempos de execução
*/
#include <stdio.h>
#include <sys/time.h>
#include <iostream>
using namespace std;
/**
* O agumento deve ser double
*/
#define GET_TIME(now) { \
struct timespec time; \
clock_gettime(CLOCK_MONOTONIC_RAW, &time); \
now = time.tv_sec + time.tv_nsec/1000000000.0; \
}
/**
* Para checar erros em chamadas Cuda
*/
#define CUDA_SAFE_CALL(call) { \
cudaError_t err = call; \
if(err != cudaSuccess) { \
fprintf(stderr,"Erro no arquivo '%s', linha %i: %s.\n",__FILE__, __LINE__,cudaGetErrorString(err)); \
exit(EXIT_FAILURE); } \
}
/**
* Funcao para execucao sequencial
*/
void multMatSeq(double *a, double *b, double *c, int mA, int nAmB, int nB) {
int i, j, k;
double soma;
for(i=0; i<mA; i++)
for(j=0; j<nB; j++) {
soma = 0;
for(k=0; k<nAmB; k++) {
soma += a[i*nAmB+k] * b[k*nB+j];
}
c[i*nB+j] = soma;
}
}
/**
* Kernel para execucao paralela em CUDA
*/
__global__ void multMatPar(double *a, double *b, double *c, int mA, int nAmB, int nB) {
// Coordenadas globais da thread
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// Coordenadas locais da thread
int i_bloco = threadIdx.x;
int j_bloco = threadIdx.y;
extern __shared__ double mat_sub[];
// Memória compartilhada para a submatriz de A
double* Asub = (double*) mat_sub;
// Memória compartilhada para a submatriz de B
double* Bsub= (double*) &Asub[blockDim.x*blockDim.y];
double valor = 0;
for(int passo=0; passo<nAmB; passo+=blockDim.y) {
if (i < mA && (passo+j_bloco) < nAmB)
Asub[i_bloco*blockDim.y+j_bloco] = a[i*nAmB+passo+j_bloco];
else
Asub[i_bloco*blockDim.y+j_bloco] = 0;
if ((passo+i_bloco) < nAmB && j < nB)
Bsub[i_bloco*blockDim.y+j_bloco] = b[(passo+i_bloco)*nB+j];
else
Bsub[i_bloco*blockDim.y+j_bloco] = 0;
__syncthreads();
if (i < mA && j < nB)
for (int k = 0; k < blockDim.y; k++) {
valor += Asub[i_bloco*blockDim.y+k] * Bsub[k*blockDim.y+j_bloco];
}
__syncthreads();
}
if (i < mA && j < nB)
c[i*nB+j] = valor;
}
/**
* Função que aloca espaco para uma matriz e preenche seus valores
* Entrada: matriz de entrada, dimensoes da matriz
* Saída: retorna 1 se a matriz foi preenchida com sucesso e 0 caso contrario
*/
int preencheMatriz(double **mat, int linhas, int colunas) {
int i, j;
//aloca espaco de memoria para a matriz
*mat = (double*) malloc(sizeof(double) * linhas * colunas);
if (mat == NULL) return 0;
//preenche o vetor
for (i=0; i<linhas; i++) {
for (j=0; j<colunas; j++) {
*((*mat) + (i*colunas+j)) = 1.5;
}
}
return 1;
}
void checkResults(double *mat1, double *mat2, int m, int n){
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
if (fabs(mat1[i*n+j] - mat2[i*n+j]) > 1e-5) {
cerr << "Resultado incorreto em " << i << " x " << j << " -> " << mat1[i*n+j] << " " << mat2[i*n+j] << endl;
exit(EXIT_FAILURE);
}
}
}
}
/**
* Imprime os resultados do programa
*/
void printResults(unsigned int mA, unsigned int nA, unsigned int mB, unsigned int nB, unsigned int blockLines, unsigned int blockColumns, double tempoSeq, float delta_eventos, double initialParTime, double finalParTime, bool csv = true){
if (csv) {
cout << mA << ";" << nA << ";" << mB << ";" << nB << ";" << blockLines << ";" << blockColumns << ";" << tempoSeq << ";" << delta_eventos/1000 << ";" << initialParTime << ";" << finalParTime << ";" << endl;
} else {
cout << "Dimensões da matriz A = " << mA << " x " << nA << endl
<< "Dimensões da matriz B = " << mB << " x " << nB << endl
<< "Dimensões dos blocos = " << blockLines << " x " << blockColumns << endl
<< "Tempo sequencial = "<< tempoSeq <<" seg" << endl
<< "Tempo paralelo kernel = "<< delta_eventos/1000 << " seg" << endl
<< "Tempo paralelo begin = "<< initialParTime <<" seg" << endl
<< "Tempo paralelo end = "<< finalParTime <<" seg" << endl
<< "Tempo paralelo total = "<< initialParTime+(delta_eventos/1000)+finalParTime <<" seg" << endl;
}
}
//funcao principal
int main(int argc, char** argv) {
double *h_a, *h_b, *h_c, *h_c_seq; //matrizes host
double *d_a, *d_b, *d_c; //matrizes device
//para medidas de tempo
double begin, end, initialParTime, finalParTime, tempoSeq;
cudaEvent_t start, stop;
//entrada de dados
unsigned int mA, nA, mB, nB; // Dimensão das matrizes de entrada
long int bytesA, bytesB, bytesC; //qtde bytes por matriz
//tamanho dos blocos de threads
unsigned int blockLines, blockColumns;
//le e valida os parametros de entrada
if(argc < 6) {
cerr << "Digite: "<< argv[0] <<" <nº de linhas da matriz A> <nº de colunas da matriz A> <nº de linhas da matriz B> <nº de colunas da matriz B> <nº de linhas e colunas dos blocos>" << endl;
exit(EXIT_FAILURE);
}
//dimensao das matrizes e tamanho dos blocos
mA = atol(argv[1]);
nA = atol(argv[2]);
mB = atol(argv[3]);
nB = atol(argv[4]);
blockLines = atol(argv[5]);
blockColumns = atol(argv[5]);
if (nA != mB) {
cerr << "Impossível executar multiplicação das matrizes. Número de colunas da matriz A ("<< nA <<") não bate com o número de colunas da matriz B ("<< mB <<")" << endl;
exit(EXIT_FAILURE);
}
//calcula o tamanho em bytes das matrizes
bytesA = mA*nA*sizeof(double);
bytesB = mB*nB*sizeof(double);
bytesC = mA*nB*sizeof(double);
// Aloca e preenche a matriz de entrada A
if (preencheMatriz(&h_a, mA, nA) == 0) {
cerr << "Erro de preenchimento da matriz de entrada A" << endl;
exit(EXIT_FAILURE);
}
// Aloca e preenche a matriz de entrada B
if (preencheMatriz(&h_b, mB, nB) == 0) {
cerr << "Erro de preenchimento da matriz de entrada B" << endl;
exit(EXIT_FAILURE);
}
// Aloca a matriz de saída paralelo
h_c = (double*) malloc(bytesC);
if (h_c == NULL) {
cerr << "Erro de alocacao da matriz de saida" << endl;
exit(EXIT_FAILURE);
}
// Aloca a matriz de saída sequencial
h_c_seq = (double*) malloc(bytesC);
if (h_c_seq == NULL) {
cerr << "Erro de alocacao da matriz de saida" << endl;
exit(EXIT_FAILURE);
}
//!!! ------------------------ executa sequencial ---------------------------------- !!!//
GET_TIME(begin);
multMatSeq(h_a, h_b, h_c_seq, mA, nA, nB);
GET_TIME(end);
tempoSeq = end-begin; // calcula o tempo sequencial em segundos
//!!! ------------------------ executa em paralelo em CUDA -------------------------- !!!//
GET_TIME(begin);
// Aloca espaço para as matrizes na GPU
CUDA_SAFE_CALL(cudaMalloc((void**) &d_a, bytesA));
CUDA_SAFE_CALL(cudaMalloc((void**) &d_b, bytesB));
CUDA_SAFE_CALL(cudaMalloc((void**) &d_c, bytesC));
// Copia as matrizes de entrada da CPU para a GPU (host para device)
CUDA_SAFE_CALL(cudaMemcpy(d_a, h_a, bytesA, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_b, h_b, bytesB, cudaMemcpyHostToDevice));
// Invoca o kernel com blocos de tamanhos fixos
dim3 threadsBloco = {blockLines, blockColumns, 1};
dim3 blocosGrade = {(mA + threadsBloco.x - 1)/threadsBloco.x, (nB + threadsBloco.y - 1)/threadsBloco.y, 1};
int tamMemCompartilhada = blockLines*blockColumns*8*2;
GET_TIME(end);
initialParTime = end-begin; // Calcula o tempo das inicializações paralelo em segundos
//dispara o kernel
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
CUDA_SAFE_CALL(cudaEventRecord(start));
multMatPar<<<blocosGrade, threadsBloco, tamMemCompartilhada>>>(d_a, d_b, d_c, mA, nA, nB);
CUDA_SAFE_CALL(cudaGetLastError());
CUDA_SAFE_CALL(cudaEventRecord(stop));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float delta_eventos = 0;
CUDA_SAFE_CALL(cudaEventElapsedTime(&delta_eventos, start, stop));
//copia resultado da GPU para a CPU (device para host)
GET_TIME(begin);
CUDA_SAFE_CALL(cudaMemcpy(h_c, d_c, bytesC, cudaMemcpyDeviceToHost))
GET_TIME(end);
finalParTime = end-begin; // calcula o tempo das finalizacoes paralelo em segundos
checkResults(h_c_seq, h_c, mA, nB);
// Libera a memória na GPU
CUDA_SAFE_CALL(cudaFree(d_a));
CUDA_SAFE_CALL(cudaFree(d_b));
CUDA_SAFE_CALL(cudaFree(d_c));
// Libera a memória na CPU
free(h_a);
free(h_b);
free(h_c);
//------------------------------- imprime dos tempos de execucao ----------------------//
printResults(mA, nA, mB, nB, blockLines, blockColumns, tempoSeq, delta_eventos, initialParTime, finalParTime);
return 0;
}
|
6677943ecc3acdb53a0f4d2ecd7f5e34943971b8.hip | // !!! This is a file automatically generated by hipify!!!
/*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
#include "streamcontainer.h"
#include "memory_leak_operators.h"
#include "evaluators.h"
#include "cube.h"
#include "bubbles_cuda.h"
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define X_ 0
#define Y_ 1
#define Z_ 2
#define NLIP 7
#define BLOCK_SIZE 256
#define FULL_MASK 0xffffffff
extern __shared__ double shared_memory[];
/*
* NOTE: this method assumes that the grid is equidistant (in the sense that all cells have equal length)
*/
template <int nlip>
__device__ inline
void calculate_icell_equidistant(const Grid1D *grid,
const double coordinate,
int &icell,
double &in_cell_coordinate,
double &one_per_grid_step
) {
double grid_step = grid->h[0];
one_per_grid_step = 1.0 / grid_step;
double start = grid->d[0];
double cell_length = (nlip-1) * grid_step;
icell = (int)((coordinate - start) / (cell_length));
if (coordinate - start < 0.0) icell = -1;
double cell_center = start + ((double)icell + 0.5) * cell_length;
in_cell_coordinate = (coordinate - cell_center) * one_per_grid_step;
}
/*
* Read Lagrange interpolation polynomials into the shared memory.
*/
template <int nlip, int result_length>
__device__ inline
void read_lip(double *device_lip, int thread_id, double *shared_memory_lip) {
__syncthreads();
if (thread_id < nlip * result_length) {
shared_memory_lip[thread_id] = device_lip[thread_id];
}
__syncthreads();
}
/*
*
* NOTE: polynomials must be an array of size 8
*/
template <int nlip, int result_length>
__device__ inline
void evaluate_polynomials(double *lip, double x, double *polynomials) {
for (int i = 0; i < result_length; i++) {
// init the polynomial as the first value of the lip
polynomials[i] = lip[i*nlip];
for (int k = 1; k < nlip; k++) {
polynomials[i] = lip[i*nlip + k] + x*polynomials[i];
}
}
if (result_length < 1) polynomials[0] = 0.0;
if (result_length < 2) polynomials[1] = 0.0;
if (result_length < 3) polynomials[2] = 0.0;
if (result_length < 4) polynomials[3] = 0.0;
if (result_length < 5) polynomials[4] = 0.0;
if (result_length < 6) polynomials[5] = 0.0;
if (result_length < 7) polynomials[6] = 0.0;
if (result_length < 8) polynomials[7] = 0.0;
}
/*
* Evaluates sum of 'coefficients[i]' times 'polynomials[i]' for 'coefficients'
* that reside in device memory.
*
* NOTE: polynomials must be an array of size 8
*/
template <int nlip>
__device__ inline
double evaluate_coefficients(double *polynomials, const double* __restrict__ c, int address, int thread_id) {
const int EVALUATE_BLOCK_SIZE = 8;
// get the thread rank within its little block of size 'EVALUATE_BLOCK_SIZE'
int thread_rank = thread_id%EVALUATE_BLOCK_SIZE;
// let us use the results array as temp array
double temp_results[EVALUATE_BLOCK_SIZE];
// TODO: make this more generic
int addresses[EVALUATE_BLOCK_SIZE];
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
addresses[0] =__shfl(address, 0, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[1] =__shfl(address, 1, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[2] =__shfl(address, 2, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[3] =__shfl(address, 3, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[4] =__shfl(address, 4, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[5] =__shfl(address, 5, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[6] =__shfl(address, 6, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[7] =__shfl(address, 7, EVALUATE_BLOCK_SIZE) + thread_rank;
#elif (__CUDA_ARCH__ >= 700)
addresses[0] =__shfl_sync(FULL_MASK, address, 0, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[1] =__shfl_sync(FULL_MASK, address, 1, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[2] =__shfl_sync(FULL_MASK, address, 2, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[3] =__shfl_sync(FULL_MASK, address, 3, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[4] =__shfl_sync(FULL_MASK, address, 4, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[5] =__shfl_sync(FULL_MASK, address, 5, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[6] =__shfl_sync(FULL_MASK, address, 6, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[7] =__shfl_sync(FULL_MASK, address, 7, EVALUATE_BLOCK_SIZE) + thread_rank;
#endif
int reg = thread_rank;
if (thread_rank < nlip) {
temp_results[reg] = __ldg(&c[addresses[0]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[1]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[2]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[3]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[4]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[5]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[6]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[7]]);
}
else {
temp_results[0] = 0.0;
temp_results[1] = 0.0;
temp_results[2] = 0.0;
temp_results[3] = 0.0;
temp_results[4] = 0.0;
temp_results[5] = 0.0;
temp_results[6] = 0.0;
temp_results[7] = 0.0;
}
reg = thread_rank;
double result = temp_results[0] * polynomials[reg];
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[1], thread_rank+1, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[2], thread_rank+2, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[3], thread_rank+3, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[4], thread_rank+4, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[5], thread_rank+5, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[6], thread_rank+6, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[7], thread_rank+7, EVALUATE_BLOCK_SIZE) * polynomials[reg];
#elif (__CUDA_ARCH__ >= 700)
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[1], thread_rank+1, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[2], thread_rank+2, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[3], thread_rank+3, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[4], thread_rank+4, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[5], thread_rank+5, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[6], thread_rank+6, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[7], thread_rank+7, EVALUATE_BLOCK_SIZE) * polynomials[reg];
#endif
return result;
}
/*
* Evaluates sum of 'coefficients[i]' times 'polynomials[i]' for 'coefficients'
* that reside in registers.
*
* NOTE: 'polynomials' and 'c' must be arrays of size nlip
*/
template <int nlip>
__device__ inline
double evaluate_coefficients_register(double *polynomials, double *c) {
double result = 0.0;
for (int ilip = 0; ilip < nlip; ilip ++) {
result += polynomials[ilip] * c[ilip];
}
return result;
}
/*
* Evaluates sum of 'coefficients[i]' times 'polynomials[i]' for 'coefficients'
* that reside in registers that are spread within the neighbouring threads.
* Also the 'polynomials should lie in registers'
*
* NOTE: 'polynomials' and 'c' must be arrays of size nlip
*/
template <int nlip>
__device__ inline
double evaluate_coefficients_shuffle(double *polynomials, double coefficient, int thread_order_number, int x_modulo) {
double result = 0.0;
if (nlip == 7) {
// get the number of thread having the first coefficient
int first_of_cell = thread_order_number - x_modulo;
// do not take the 32:nd thread in to the games because each warp is handling
// 5 cells, i.e., 6*5 + 1 points
if (thread_order_number < 31) {
result = __shfl(coefficient, first_of_cell , 32) * polynomials[0];
result += __shfl(coefficient, first_of_cell+1, 32) * polynomials[1];
result += __shfl(coefficient, first_of_cell+2, 32) * polynomials[2];
result += __shfl(coefficient, first_of_cell+3, 32) * polynomials[3];
result += __shfl(coefficient, first_of_cell+4, 32) * polynomials[4];
result += __shfl(coefficient, first_of_cell+5, 32) * polynomials[5];
result += __shfl(coefficient, first_of_cell+6, 32) * polynomials[6];
}
}
return result;
}
/*
* Evaluate cube at 'points'
*
* if calling to the version with evaluate_gradients=true, we are also evaluating the
* gradients and storing the results to 'device_gradients'
*/
template <bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z>
__global__ void
#if (__CUDA_ARCH__ <= 350)
__launch_bounds__(BLOCK_SIZE)
#else
__launch_bounds__(BLOCK_SIZE)
#endif
CubeEvaluator_evaluate_points(const double* __restrict__ device_cube,
const size_t device_pitch,
const size_t device_shape_y,
const Grid3D* __restrict__ grid,
double* __restrict__ result_array,
double* __restrict__ device_gradients_x,
double* __restrict__ device_gradients_y,
double* __restrict__ device_gradients_z,
// a 3d array, where the x coordinates are first,
// then y coordinates, and finally the z coordinates. This ordering
// is selected to get coalesced memory reads
const double* __restrict__ points,
// total number of points evaluated by this device
const int device_number_of_points,
// number of points in this kernel call
const int point_count,
// device_point_offset
const int device_point_offset,
const double multiplier
) {
// Get the point order number within this kernel call
int id = blockIdx.x * blockDim.x + threadIdx.x;
double value, gradient[3];
double in_cell_coordinate_x = 0.0, in_cell_coordinate_y = 0.0, in_cell_coordinate_z = 0.0;
double one_per_grid_step_x = 0.0, one_per_grid_step_y = 0.0, one_per_grid_step_z = 0.0;
int icell_x = 0, icell_y = 0, icell_z = 0, ncell_x = 0, ncell_y= 0, ncell_z= 0;
bool valid_point = true;
// get the number of cells
ncell_x = grid->axis[X_]->ncell;
ncell_y = grid->axis[Y_]->ncell;
ncell_z = grid->axis[Z_]->ncell;
if (id + device_point_offset < device_number_of_points && id < point_count ) {
// get the cell indices and coordinates within cell in grid steps
calculate_icell_equidistant<NLIP>(
grid->axis[X_], points[id + device_point_offset], icell_x, in_cell_coordinate_x, one_per_grid_step_x);
calculate_icell_equidistant<NLIP>(
grid->axis[Y_], points[id + device_point_offset + device_number_of_points], icell_y, in_cell_coordinate_y, one_per_grid_step_y);
calculate_icell_equidistant<NLIP>(
grid->axis[Z_], points[id + device_point_offset + device_number_of_points*2], icell_z, in_cell_coordinate_z, one_per_grid_step_z);
}
else {
valid_point = false;
}
// if the result is not within the grid, set the icells to 0 and mark the point to be non-valid
if (icell_x < 0 || icell_x >= ncell_x || icell_y < 0 || icell_y >= ncell_y || icell_z < 0 || icell_z >= ncell_z) {
icell_x = 0;
icell_y = 0;
icell_z = 0;
valid_point = false;
}
// read the LIPs in the shared memory
__shared__ double lip[NLIP * NLIP];
read_lip<NLIP, NLIP>(grid->axis[X_]->lip, threadIdx.x, lip);
// evaluate the polynomials in x and y directions
double x_polynomials[8];
evaluate_polynomials<NLIP, NLIP>(lip, in_cell_coordinate_x, x_polynomials);
double polynomials[8];
evaluate_polynomials<NLIP, NLIP>(lip, in_cell_coordinate_y, polynomials);
double x_values[NLIP], y_values[NLIP];
// get the address to the first grid point of icell_x, icell_y and icell_z
int address = icell_x * (NLIP-1)
+ icell_y * device_pitch / sizeof(double) * (NLIP-1)
+ icell_z * device_pitch / sizeof(double) * device_shape_y * (NLIP-1);
if (evaluate_value || evaluate_gradients_z) {
for (int j = 0; j < NLIP; j++) {
// add the address by 'j' slices
int z_address = address + j * device_pitch / sizeof(double) * device_shape_y;
for (int k = 0; k < NLIP; k++) {
// add the address by 'k' rows
int y_address = z_address + k * device_pitch / sizeof(double);
x_values[k] = evaluate_coefficients<NLIP>(x_polynomials, device_cube, y_address, threadIdx.x);
}
y_values[j] = evaluate_coefficients_register<NLIP>(polynomials, x_values);
}
}
if (evaluate_value) {
// evaluate the polynomials in z-direction.
// NOTE: reusing the y-direction polynomial registers
evaluate_polynomials<NLIP, NLIP>(lip, in_cell_coordinate_z, polynomials);
// evaluate the coefficients
value = evaluate_coefficients_register<NLIP>(polynomials, y_values);
// if the point handled is valid, let's add it to the results
if (valid_point) {
result_array[id+device_point_offset] += multiplier * value;
}
}
// if we are evaluating the gradients, it is done within the brackets below
if (evaluate_gradients_x || evaluate_gradients_y || evaluate_gradients_z) {
__shared__ double derivative_lip[(NLIP-1) * NLIP];
read_lip<NLIP-1, NLIP>(grid->axis[X_]->derivative_lip, threadIdx.x, derivative_lip);
if (evaluate_gradients_z) {
// evaluate the gradient polynomials in z-direction.
evaluate_polynomials<NLIP-1, NLIP>(derivative_lip, in_cell_coordinate_z, polynomials);
// multiply the polynomials with 1 / grid_step
for (int j = 0; j < NLIP; j++) {
polynomials[j] *= one_per_grid_step_z;
}
// evaluate the derivative coefficients
// we can reuse the previous y_values, which are the same for this case
gradient[Z_] = evaluate_coefficients_register<NLIP>(polynomials, y_values);
}
// NOTE: we now have the derivatives in x-direction, but for the rest y- and z- directions,
// we have to recalculate everything else, as we need to save some registers.
// If we would have 49*2 extra registers the next loop would be futile.
if (evaluate_gradients_y) {
// let's calculate the y-axis derivative polynomials.
// Note that we are still using the same x-direction polynomials
evaluate_polynomials<NLIP-1, NLIP>(derivative_lip, in_cell_coordinate_y, polynomials);
// multiply the polynomials with 1 / grid_step
for (int j = 0; j < NLIP; j++) {
polynomials[j] *= one_per_grid_step_y;
}
// and let's do the looping again
for (int j = 0; j < NLIP; j++) {
// add the address by 'j' slices
int z_address = address + j * device_pitch / sizeof(double) * device_shape_y;
for (int k = 0; k < NLIP; k++) {
// add the address by 'k' rows
int y_address = z_address + k * device_pitch / sizeof(double);
x_values[k] = evaluate_coefficients<NLIP>(x_polynomials, device_cube, y_address, threadIdx.x);
}
y_values[j] = evaluate_coefficients_register<NLIP>(polynomials, x_values);
}
// evaluate the polynomials in z-direction.
// reusing the y-direction polynomial registers
evaluate_polynomials<NLIP, NLIP>(lip, in_cell_coordinate_z, polynomials);
// finally, we can get the derivative in y-direction
gradient[Y_] = evaluate_coefficients_register<NLIP>(polynomials, y_values);
}
if (evaluate_gradients_x) {
// evaluate the normal polynomials in y-direction
evaluate_polynomials<NLIP, NLIP>(lip, in_cell_coordinate_y, polynomials);
// and evaluate the derivative polynomials in x-direction
evaluate_polynomials<NLIP-1, NLIP>(derivative_lip, in_cell_coordinate_x, x_polynomials);
// multiply the polynomials with 1 / grid_step
for (int j = 0; j < NLIP; j++) {
x_polynomials[j] *= one_per_grid_step_x;
}
// and let's do the looping again
for (int j = 0; j < NLIP; j++) {
// add the address by 'j' slices
int z_address = address + j * device_pitch / sizeof(double) * device_shape_y;
for (int k = 0; k < NLIP; k++) {
// add the address by 'k' rows
int y_address = z_address + k * device_pitch / sizeof(double);
x_values[k] = evaluate_coefficients<NLIP>(x_polynomials, device_cube, y_address, threadIdx.x);
}
y_values[j] = evaluate_coefficients_register<NLIP>(polynomials, x_values);
}
// evaluate the polynomials in z-direction.
// reusing the y-direction polynomial registers
evaluate_polynomials<NLIP, NLIP>(lip, in_cell_coordinate_z, polynomials);
// finally, we are ready and can get the derivative in z-direction
gradient[X_] = evaluate_coefficients_register<NLIP>(polynomials, y_values);
}
// if the point handled is valid, let's store the gradient to the device_gradients
if (valid_point) {
if (evaluate_gradients_x) device_gradients_x[id+device_point_offset] += multiplier * gradient[X_];
if (evaluate_gradients_y) device_gradients_y[id+device_point_offset] += multiplier * gradient[Y_];
if (evaluate_gradients_z) device_gradients_z[id+device_point_offset] += multiplier * gradient[Z_];
}
}
return;
}
/*
* Evaluate cube gradients at grid points. The results are stored to 'device_gradients'.
*/
template <int nlip, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z>
__global__ void
#if (__CUDA_ARCH__ <= 350)
__launch_bounds__(BLOCK_SIZE)
#else
__launch_bounds__(BLOCK_SIZE)
#endif
CubeEvaluator_evaluate_grid_gradients(const double* __restrict__ device_cube,
const size_t device_pitch,
const size_t device_shape_y,
const Grid3D* __restrict__ grid,
double* __restrict__ device_gradients_x,
double* __restrict__ device_gradients_y,
double* __restrict__ device_gradients_z,
// number of slices handled by this device
// in previous calls
int device_slice_offset,
// number of slices handled by all devices
// in previous calls
int slice_offset,
// number of slices handled by this call
int slice_count,
// number of slices handled by this
// number of warps in a x-axis row
int warps_per_string,
const double multiplier
) {
// Get the point order number within this kernel call
int global_warp_id, thread_order_number, cells_per_warp;
int WARP_SIZE = 32;
bool valid_point = true;
// if nlip is 7, each warp of 32 handles 5 cells
if (nlip == 7) {
// get the global warp order number
global_warp_id = blockIdx.x * blockDim.x / WARP_SIZE
+ threadIdx. x / WARP_SIZE;
cells_per_warp = 5;
// get the order number of thread within the warp
thread_order_number = threadIdx.x % WARP_SIZE;
if (thread_order_number == 31) valid_point = false;
}
// get the number of cells
int ncell_x = grid->axis[X_]->ncell;
int ncell_y = grid->axis[Y_]->ncell;
int ncell_z = grid->axis[Z_]->ncell;
int y_shape = ncell_y * (nlip-1) + 1;
// get the z and y coordinates
int z = global_warp_id / (warps_per_string * y_shape);
int y = (global_warp_id - z * warps_per_string * y_shape) / warps_per_string;
// get the warp id withing the x-axis string
int string_warp_id = (global_warp_id
- z * warps_per_string * y_shape
- y * warps_per_string);
int icell_x = string_warp_id * cells_per_warp + thread_order_number / (nlip-1);
int x_modulo = thread_order_number % (nlip-1);
int x = icell_x * (nlip-1) + x_modulo;
// get the order numbers of cells within this device
int icell_z = (z + slice_offset) / (nlip-1);
int icell_y = y / (nlip-1);
// and get the remainders of the y and z coordinates
int y_modulo = y % (nlip-1);
int z_modulo = (z + slice_offset) % (nlip-1);
// if this thread handles the last cell of the x-axis
// set the correct icell
if (x_modulo == 0 && icell_x > 0) {
icell_x -= 1;
x_modulo = 6;
}
// if this thread handles data in the last index of the y-axis
if (y_modulo == 0 && icell_y > 0) {
icell_y -= 1;
y_modulo = 6;
}
// if this thread handles data in the last index of the z-axis
if (z_modulo == 0 && icell_z > 0) {
icell_z -= 1;
z_modulo = 6;
}
// if the result is not within the grid, mark the point to be non-valid
if ( icell_x < 0 || x >= ncell_x * (nlip-1) + 1
|| icell_y < 0 || y >= y_shape
|| icell_z < 0 || z + slice_offset >= ncell_z * (nlip-1) + 1
|| z >= slice_count) {
valid_point = false;
icell_x = 0;
icell_y = 0;
icell_z = 0;
x = 0;
y = 0;
z = 0;
thread_order_number = 32;
}
if (thread_order_number == 0 && x_modulo != 0) valid_point = false;
// calculate the 1 / grid steps for all axis
double one_per_grid_step_x = 1.0 / grid->axis[X_]->h[icell_x];
double one_per_grid_step_y = 1.0 / grid->axis[Y_]->h[icell_y];
double one_per_grid_step_z = 1.0 / grid->axis[Z_]->h[icell_z];
// get the in cell coordinate of x
double in_cell_coordinate_x = (double)(x_modulo - 3);
double in_cell_coordinate_y = (double)(y_modulo - 3);
double in_cell_coordinate_z = (double)(z_modulo - 3);
// read the LIPs in the shared memory
__shared__ double lip[nlip * nlip];
read_lip<nlip, nlip>(grid->axis[X_]->lip, threadIdx.x, lip);
__shared__ double derivative_lip[(nlip-1) * nlip];
read_lip<nlip-1, nlip>(grid->axis[X_]->derivative_lip, threadIdx.x, derivative_lip);
// init the polynomials in x direction
double x_polynomials[8];
// init the polynomials in y/z direction
double polynomials[8];
double x_values[nlip], y_values[nlip];
int address;
double gradient[3];
// evaluate gradient to x direction
if (evaluate_gradients_z) {
address = x
+ y * device_pitch / sizeof(double)
+ icell_z * (nlip-1) * device_pitch / sizeof(double) * device_shape_y;
for (int j = 0; j < nlip; j++) {
// add the address by 'j' slices
int z_address = address + j * device_pitch / sizeof(double) * device_shape_y;
// read the value
y_values[j] = device_cube[z_address];
}
// evaluate the polynomials in z-direction.
evaluate_polynomials<nlip-1, nlip>(derivative_lip, in_cell_coordinate_z, polynomials);
// multiply the polynomials with 1 / grid_step
for (int j = 0; j < nlip; j++) {
polynomials[j] *= one_per_grid_step_z;
}
// Now we have all to evaluate the gradients in z direction. Let's do it.
gradient[Z_] = evaluate_coefficients_register<nlip>(polynomials, y_values);
}
// evaluate gradient to y direction
if (evaluate_gradients_y) {
// evaluate the derivative polynomials in y direction
evaluate_polynomials<nlip-1, nlip>(derivative_lip, in_cell_coordinate_y, polynomials);
// multiply the polynomials with 1 / grid_step
for (int j = 0; j < nlip; j++) {
polynomials[j] *= one_per_grid_step_y;
}
// get the address to the first grid point of icell_y and icell_z and to the point x
address = x
+ icell_y * device_pitch / sizeof(double) * (nlip-1)
+ icell_z * device_pitch / sizeof(double) * device_shape_y * (nlip-1);
for (int j = 0; j < nlip; j++) {
// add the address by 'j' slices
int z_address = address + j * device_pitch / sizeof(double) * device_shape_y;
for (int k = 0; k < nlip; k++) {
// add the address by 'k' rows
int y_address = z_address + k * device_pitch / sizeof(double);
// read in the x value
x_values[k] = device_cube[y_address];
}
y_values[j] = evaluate_coefficients_register<nlip>(polynomials, x_values);
}
// evaluate the polynomials in z-direction.
// NOTE: reusing the y-direction polynomial registers
evaluate_polynomials<nlip, nlip>(lip, in_cell_coordinate_z, polynomials);
// Now we have all to evaluate the gradients in y direction. Let's do it.
gradient[Y_] = evaluate_coefficients_register<nlip>(polynomials, y_values);
}
// evaluate gradient to z direction
if (evaluate_gradients_x) {
// evaluate the polynomials in y-direction.
evaluate_polynomials<nlip, nlip>(lip, in_cell_coordinate_y, polynomials);
// evaluate the derivative polynomials in x direction
evaluate_polynomials<nlip-1, nlip>(derivative_lip, in_cell_coordinate_x, x_polynomials);
// multiply the polynomials with 1 / grid_step
for (int j = 0; j < nlip; j++) {
x_polynomials[j] *= one_per_grid_step_x;
}
address = x
+ icell_y * device_pitch / sizeof(double) * (nlip-1)
+ icell_z * device_pitch / sizeof(double) * device_shape_y * (nlip-1);
for (int j = 0; j < nlip; j++) {
// add the address by 'j' slices
int z_address = address + j * device_pitch / sizeof(double) * device_shape_y;
for (int k = 0; k < nlip; k++) {
// add the address by 'k' rows
int y_address = z_address + k * device_pitch / sizeof(double);
// read in the x value
double x_value = device_cube[y_address];
// evaluate the derivative value
x_values[k] = evaluate_coefficients_shuffle<nlip>(x_polynomials, x_value, thread_order_number, x_modulo);
}
y_values[j] = evaluate_coefficients_register<nlip>(polynomials, x_values);
}
// evaluate the polynomials in z-direction.
// NOTE: reusing the y-direction polynomial registers
evaluate_polynomials<nlip, nlip>(lip, in_cell_coordinate_z, polynomials);
// evaluate the coefficients
gradient[X_] = evaluate_coefficients_register<nlip>(polynomials, y_values);
}
address = x
+ y * device_pitch / sizeof(double)
+ (z + device_slice_offset) * device_pitch / sizeof(double) * device_shape_y;
// if the point handled is valid, let's add it to the results
if (valid_point) {
/*if (x >= ncell_x * (nlip-1) +1 || y >= ncell_y * (nlip-1) + 1 || z + slice_offset >= ncell_z * (nlip-1) + 1 || z >= slice_count) {
printf("over bounds x: %d/%d, y: %d/%d, z: %d / %d\n", x, ncell_x * (nlip-1) +1, y, ncell_y * (nlip-1) + 1, z, slice_count);
}*/
//int max_address = (device_slice_offset + slice_count) * device_shape_y * device_pitch / sizeof(double);
//if (address >= max_address || address < 0 ) printf("address over bounds: %d / %d", address, max_address);
if (evaluate_gradients_x) device_gradients_x[address] += multiplier * gradient[X_];
if (evaluate_gradients_y) device_gradients_y[address] += multiplier * gradient[Y_];
if (evaluate_gradients_z) device_gradients_z[address] += multiplier * gradient[Z_];
}
return;
}
// this is a bit ugly ... ideally fin_diff_order and grid_type should both be
// tempate parameters, but they should also be user-input ...
template<int finite_diff_order>
__device__ __forceinline__
double evaluate_derivative(const int curr_id,
const int prev_id1, const int prev_id2, const int prev_id3, const int prev_id4, const int prev_id5, const int prev_id6, const int prev_id7, const int prev_id8,
const int next_id1, const int next_id2, const int next_id3, const int next_id4, const int next_id5, const int next_id6, const int next_id7, const int next_id8,
const double* __restrict__ device_cube, const int grid_type, const int local_pos, const double h){
if (curr_id == -1) return 0.0;
// printf("xy: %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i\n", prev_id6, prev_id5, prev_id4, prev_id3, prev_id2, prev_id1, next_id1, next_id2, next_id3, next_id4, next_id5, next_id6);
double curr_value,
prev_value1, prev_value2, prev_value3, prev_value4, prev_value5, prev_value6, prev_value7, prev_value8,
next_value1, next_value2, next_value3, next_value4, next_value5, next_value6, next_value7, next_value8;
if (curr_id > -1) curr_value = __ldg(&device_cube[curr_id]);
if (prev_id1 > -1) prev_value1 = __ldg(&device_cube[prev_id1]);
if (next_id1 > -1) next_value1 = __ldg(&device_cube[next_id1]);
if (finite_diff_order >= 3){
if (prev_id2 > -1) prev_value2 = __ldg(&device_cube[prev_id2]);
if (next_id2 > -1) next_value2 = __ldg(&device_cube[next_id2]);
}
if (finite_diff_order >= 4){
if (prev_id3 > -1) prev_value3 = __ldg(&device_cube[prev_id3]);
if (next_id3 > -1) next_value3 = __ldg(&device_cube[next_id3]);
}
if (finite_diff_order >= 5){
if (prev_id4 > -1) prev_value4 = __ldg(&device_cube[prev_id4]);
if (next_id4 > -1) next_value4 = __ldg(&device_cube[next_id4]);
}
if (finite_diff_order >= 6){
if (prev_id5 > -1) prev_value5 = __ldg(&device_cube[prev_id5]);
if (next_id5 > -1) next_value5 = __ldg(&device_cube[next_id5]);
}
if (finite_diff_order >= 7){
if (prev_id6 > -1) prev_value6 = __ldg(&device_cube[prev_id6]);
if (next_id6 > -1) next_value6 = __ldg(&device_cube[next_id6]);
}
if (finite_diff_order >= 8){
if (prev_id7 > -1) prev_value7 = __ldg(&device_cube[prev_id7]);
if (next_id7 > -1) next_value7 = __ldg(&device_cube[next_id7]);
}
if (finite_diff_order >= 9){
if (prev_id8 > -1) prev_value8 = __ldg(&device_cube[prev_id8]);
if (next_id8 > -1) next_value8 = __ldg(&device_cube[next_id8]);
}
if(grid_type == 1){ // equidistant
if ( finite_diff_order >= 11 && prev_id1 > -1 && next_id1 > -1 && prev_id2 > -1 && next_id2 > -1 && prev_id3 > -1 && next_id3 > -1 && prev_id4 > -1 && next_id4 > -1 && prev_id5 > -1 && next_id5 > -1 ) { // x x x x x o x x x x x
return (-1.0 * prev_value5 + 12.5 * prev_value4 - 75.0 * prev_value3 + 300.0 * prev_value2 - 1050.0 * prev_value1 + 1050.0 * next_value1 - 300.0 * next_value2 + 75.0 * next_value3 - 12.5 * next_value4 + 1.0 * next_value5) / (1260.0*h);
}
else if ( finite_diff_order >= 9 && prev_id1 > -1 && next_id1 > -1 && prev_id2 > -1 && next_id2 > -1 && prev_id3 > -1 && next_id3 > -1 && prev_id4 > -1 && next_id4 > -1 ) { // x x x x o x x x x
return (3.0 * prev_value4 - 32.0 * prev_value3 + 168.0 * prev_value2 - 672.0 * prev_value1 + 672.0 * next_value1 - 168.0 * next_value2 + 32.0 * next_value3 - 3.0 * next_value4 ) / (840.0*h);
}
else if ( finite_diff_order >= 7 && prev_id1 > -1 && next_id1 > -1 && prev_id2 > -1 && next_id2 > -1 && prev_id3 > -1 && next_id3 > -1 ) { // x x x o x x x
return (-1.0 * prev_value3 + 9.0 * prev_value2 - 45.0 * prev_value1 + 45.0 * next_value1 - 9.0 * next_value2 + 1.0 * next_value3) / (60.0*h);
}
else if ( finite_diff_order >= 5 && prev_id1 > -1 && next_id1 > -1 && prev_id2 > -1 && next_id2 > -1 ) { // x x o x x
return (-1.0 * next_value2 + 8.0 * next_value1 - 8.0 * prev_value1 + prev_value2) / (12.0*h);
}
else if ( finite_diff_order >= 3 && prev_id1 > -1 && next_id1 > -1 ) { // x o x
return (next_value1 - prev_value1) / (2.0 * h);
}
else if ( finite_diff_order >= 3 && next_id1 > -1 && next_id2 > -1 ) { // o x x
return (-1.0 * next_value2 + 4.0 * next_value1 - 3.0 * curr_value) / (2.0 * h);
}
else if ( finite_diff_order >= 3 && prev_id1 > -1 && prev_id2 > -1) { // x x o
return (1.0 * prev_value2 - 4.0 * prev_value2 + 3.0 * curr_value) / (2.0 * h);
}
else if ( finite_diff_order >= 2 && next_id1 > -1 ) { // o x
return (next_value1 - curr_value) / h;
}
else if ( finite_diff_order >= 2 && prev_id1 > -1 ) { // x o
return (curr_value - prev_value1) / h;
}
}
else if(grid_type == 2){ // lobatto
if (finite_diff_order == 9){
if ( prev_id4 > -1 && prev_id3 > -1 && prev_id2 > -1 && prev_id1 > -1 && next_id1 > -1 && next_id2 > -1 && next_id3 > -1 && next_id4 > -1){
// centre at pos 0: {0.00019967699748295, -0.0036233997312734, 0.057221995168223, -1.1410916482317, 0, 1.1410916482317, -0.057221995168223, 0.0036233997312734, -0.00019967699748295}
// centre at pos 1: {0.00089464933543953, -0.030021565652814, 0.64613210399319, -2.7472765818353, 1.9243274545541, 0.22575990586671, -0.022628075171264, 0.0031999844358336, -0.000387875525842}
// centre at pos 2: {0.014381546028833, -0.46942683846151, 1.7209311289771, -2.310743566852, 0.78934433020611, 0.31410347917631, -0.080765809769078, 0.032393740023427, -0.010218009329259}
// centre at pos 3: {0.075131403109164, -0.38693923899442, 0.6095587888754, -0.79702587611059, 0, 0.79702587611059, -0.6095587888754, 0.38693923899442, -0.075131403109164}
// centre at pos 4: {0.010218009329259, -0.032393740023427, 0.080765809769078, -0.31410347917631, -0.78934433020611, 2.310743566852, -1.7209311289771, 0.46942683846151, -0.014381546028833}
// centre at pos 5: {0.000387875525842, -0.0031999844358336, 0.022628075171264, -0.22575990586671, -1.9243274545541, 2.7472765818353, -0.64613210399319, 0.030021565652814, -0.00089464933543953}
// centre at pos 6: {0.00019967699748295, -0.0036233997312734, 0.057221995168223, -1.1410916482317, 0, 1.1410916482317, -0.057221995168223, 0.0036233997312734, -0.00019967699748295}
switch(local_pos){
case(0):
return ( 0.00019967699748295 * prev_value4 + -0.0036233997312734 * prev_value3 + 0.057221995168223 * prev_value2 + -1.1410916482317 * prev_value1 + 0 * curr_value + 1.1410916482317 * next_value1 + -0.057221995168223 * next_value2 + 0.0036233997312734 * next_value3 + -0.00019967699748295 * next_value4 )/h;
case(1):
return ( 0.00089464933543953 * prev_value4 + -0.030021565652814 * prev_value3 + 0.64613210399319 * prev_value2 + -2.7472765818353 * prev_value1 + 1.9243274545541 * curr_value + 0.22575990586671 * next_value1 + -0.022628075171264 * next_value2 + 0.0031999844358336 * next_value3 + -0.000387875525842 * next_value4 )/h;
case(2):
return ( 0.014381546028833 * prev_value4 + -0.46942683846151 * prev_value3 + 1.7209311289771 * prev_value2 + -2.310743566852 * prev_value1 + 0.78934433020611 * curr_value + 0.31410347917631 * next_value1 + -0.080765809769078 * next_value2 + 0.032393740023427 * next_value3 + -0.010218009329259 * next_value4 )/h;
case(3):
return ( 0.075131403109164 * prev_value4 + -0.38693923899442 * prev_value3 + 0.6095587888754 * prev_value2 + -0.79702587611059 * prev_value1 + 0 * curr_value + 0.79702587611059 * next_value1 + -0.6095587888754 * next_value2 + 0.38693923899442 * next_value3 + -0.075131403109164 * next_value4 )/h;
case(4):
return ( 0.010218009329259 * prev_value4 + -0.032393740023427 * prev_value3 + 0.080765809769078 * prev_value2 + -0.31410347917631 * prev_value1 + -0.78934433020611 * curr_value + 2.310743566852 * next_value1 + -1.7209311289771 * next_value2 + 0.46942683846151 * next_value3 + -0.014381546028833 * next_value4 )/h;
case(5):
return ( 0.000387875525842 * prev_value4 + -0.0031999844358336 * prev_value3 + 0.022628075171264 * prev_value2 + -0.22575990586671 * prev_value1 + -1.9243274545541 * curr_value + 2.7472765818353 * next_value1 + -0.64613210399319 * next_value2 + 0.030021565652814 * next_value3 + -0.00089464933543953 * next_value4 )/h;
}
}
// centre at pos 0: {-3.7853180645098, 5.504949536976, -3.1667044864103, 3.2707014929785, -5.0400751724465, 10.133071777992, -10.15049044192, 3.3679166521506, -0.13405129481064}
// centre at pos 1: {-0.70024662338511, -0.30782735432573, 1.6600314728931, -1.4050588677272, 2.0324950315796, -3.9835379214689, 3.9560488107718, -1.3031520813358, 0.051247532998119}
// centre at pos 2: {0.12436941987383, -0.5125363975973, -0.37008926442197, 1.3824872733712, -1.5646036607053, 2.8292082985006, -2.7390058952044, 0.88379134357785, -0.033621117394515}
// centre at pos 3: {-0.033971645333468, 0.11472881590315, -0.36562067241713, -0.50265593560168, 1.6092337716972, -2.276606371715, 2.0689731675221, -0.63666554571692, 0.022584415661807}
else if (prev_id1 == -1)
return ( -3.7853180645098 * curr_value + 5.504949536976 * next_value1 + -3.1667044864103 * next_value2 + 3.2707014929785 * next_value3 + -5.0400751724465 * next_value4 + 10.133071777992 * next_value5 + -10.15049044192 * next_value6 + 3.3679166521506 * next_value7 + -0.13405129481064 * next_value8 )/h;
else if (prev_id2 == -1)
return ( -0.70024662338511 * prev_value1 + -0.30782735432573 * curr_value + 1.6600314728931 * next_value1 + -1.4050588677272 * next_value2 + 2.0324950315796 * next_value3 + -3.9835379214689 * next_value4 + 3.9560488107718 * next_value5 + -1.3031520813358 * next_value6 + 0.051247532998119 * next_value7 )/h;
else if (prev_id3 == -1)
return ( 0.12436941987383 * prev_value2 + -0.5125363975973 * prev_value1 + -0.37008926442197 * curr_value + 1.3824872733712 * next_value1 + -1.5646036607053 * next_value2 + 2.8292082985006 * next_value3 + -2.7390058952044 * next_value4 + 0.88379134357785 * next_value5 + -0.033621117394515 * next_value6 )/h;
else if (prev_id4 == -1)
return ( -0.033971645333468 * prev_value3 + 0.11472881590315 * prev_value2 + -0.36562067241713 * prev_value1 + -0.50265593560168 * curr_value + 1.6092337716972 * next_value1 + -2.276606371715 * next_value2 + 2.0689731675221 * next_value3 + -0.63666554571692 * next_value4 + 0.022584415661807 * next_value5 )/h;
// centre at pos 6: {0.13405129481064, -3.3679166521506, 10.15049044192, -10.133071777992, 5.0400751724464, -3.2707014929785, 3.1667044864103, -5.504949536976, 3.7853180645098}
// centre at pos 5: {-0.051247532998119, 1.3031520813358, -3.9560488107718, 3.9835379214689, -2.0324950315796, 1.4050588677272, -1.6600314728931, 0.30782735432572, 0.70024662338511}
// centre at pos 4: {0.033621117394515, -0.88379134357785, 2.7390058952044, -2.8292082985006, 1.5646036607053, -1.3824872733712, 0.37008926442197, 0.5125363975973, -0.12436941987383}
// centre at pos 3: {-0.022584415661807, 0.63666554571692, -2.0689731675221, 2.276606371715, -1.6092337716972, 0.50265593560168, 0.36562067241713, -0.11472881590315, 0.033971645333468}
else if (next_id1 == -1)
return ( 0.13405129481064 * prev_value8 + -3.3679166521506 * prev_value7 + 10.15049044192 * prev_value6 + -10.133071777992 * prev_value5 + 5.0400751724464 * prev_value4 + -3.2707014929785 * prev_value3 + 3.1667044864103 * prev_value2 + -5.504949536976 * prev_value1 + 3.7853180645098 * curr_value )/h;
else if (next_id2 == -1)
return ( -0.051247532998119 * prev_value7 + 1.3031520813358 * prev_value6 + -3.9560488107718 * prev_value5 + 3.9835379214689 * prev_value4 + -2.0324950315796 * prev_value3 + 1.4050588677272 * prev_value2 + -1.6600314728931 * prev_value1 + 0.30782735432572 * curr_value + 0.70024662338511 * next_value1 )/h;
else if (next_id3 == -1)
return ( 0.033621117394515 * prev_value6 + -0.88379134357785 * prev_value5 + 2.7390058952044 * prev_value4 + -2.8292082985006 * prev_value3 + 1.5646036607053 * prev_value2 + -1.3824872733712 * prev_value1 + 0.37008926442197 * curr_value + 0.5125363975973 * next_value1 + -0.12436941987383 * next_value2 )/h;
else if (next_id4 == -1)
return ( -0.022584415661807 * prev_value5 + 0.63666554571692 * prev_value4 + -2.0689731675221 * prev_value3 + 2.276606371715 * prev_value2 + -1.6092337716972 * prev_value1 + 0.50265593560168 * curr_value + 0.36562067241713 * next_value1 + -0.11472881590315 * next_value2 + 0.033971645333468 * next_value3 )/h;
}
else if (finite_diff_order == 7){
if ( prev_id3 > -1 && prev_id2 > -1 && prev_id1 > -1 && next_id1 > -1 && next_id2 > -1 && next_id3 > -1 ){
// some of the following 7 rules are slightly subobtimal in the
// sense that an asymmetric choice of points would have a smaller
// error, but the problem is small and this is easier
// generated using https://github.com/lnw/finite-diff-weights
// assuming a the lobatto grid with points {-3,-2.4906716888357,-1.40654638041214,0,1.40654638041214,2.4906716888357,3}
// centre at pos 0: {-0.0019439691154442, 0.049739522152831, -1.1258469276013, 0, 1.1258469276013, -0.049739522152831, 0.0019439691154442}
// centre at pos 1: {-0.017112093697958, 0.55235536265242, -2.588681589899, 1.8401216993421, 0.23119070988819, -0.019343936928851, 0.0014698486430997}
// centre at pos 2: {-0.23589340925937, 1.1716189825849, -1.899781686814, 0.70249557758477, 0.30822337312933, -0.054985788958057, 0.0083229517325248}
// centre at pos 3: {-0.10416666666667, 0.30251482375627, -0.66898974686292, 0, 0.66898974686292, -0.30251482375627, 0.10416666666667}
// centre at pos 4: {-0.0083229517325248, 0.054985788958057, -0.30822337312933, -0.70249557758477, 1.899781686814, -1.1716189825849, 0.23589340925937}
// centre at pos 5: {-0.0014698486430997, 0.019343936928851, -0.23119070988819, -1.8401216993421, 2.588681589899, -0.55235536265242, 0.017112093697958}
// centre at pos 6: {-0.0019439691154442, 0.049739522152831, -1.1258469276013, 0, 1.1258469276013, -0.049739522152831, 0.0019439691154442}
switch(local_pos){
case(0):
return ( -0.00194396911544 * prev_value3 + 0.0497395221528 * prev_value2 + -1.1258469276 * prev_value1 + 0 * curr_value + 1.1258469276 * next_value1 + -0.0497395221528 * next_value2 + 0.00194396911544 * next_value3 )/h;
case(1):
return ( -0.017112093698 * prev_value3 + 0.552355362652 * prev_value2 + -2.5886815899 * prev_value1 + 1.84012169934 * curr_value + 0.231190709888 * next_value1 + -0.0193439369289 * next_value2 + 0.0014698486431 * next_value3 )/h;
case(2):
return ( -0.235893409259 * prev_value3 + 1.17161898258 * prev_value2 + -1.89978168681 * prev_value1 + 0.702495577585 * curr_value + 0.308223373129 * next_value1 + -0.0549857889581 * next_value2 + 0.00832295173252 * next_value3 )/h;
case(3):
return ( -0.104166666667 * prev_value3 + 0.302514823756 * prev_value2 + -0.668989746863 * prev_value1 + 0 * curr_value + 0.668989746863 * next_value1 + -0.302514823756 * next_value2 + 0.104166666667 * next_value3 )/h;
case(4):
return ( -0.00832295173252 * prev_value3 + 0.0549857889581 * prev_value2 + -0.308223373129 * prev_value1 + -0.702495577585 * curr_value + 1.89978168681 * next_value1 + -1.17161898258 * next_value2 + 0.235893409259 * next_value3 )/h;
case(5):
return ( -0.0014698486431 * prev_value3 + 0.0193439369289 * prev_value2 + -0.231190709888 * prev_value1 + -1.84012169934 * curr_value + 2.5886815899 * next_value1 + -0.552355362652 * next_value2 + 0.017112093698 * next_value3 )/h;
}
}
// generated using https://github.com/lnw/finite-diff-weights
// assuming a the lobatto grid with points {-3,-2.4906716888357,-1.40654638041214,0,1.40654638041214,2.4906716888357,3}
// centre at pos 0: {-3.5, 4.7338588676399, -1.8896617418485, 1.0666666666667, -0.68332160435892, 0.43912447856748, -0.16666666666667 }
// centre at pos 1: {-0.81430867141476, 0, 1.1519427380981, -0.53286889603279, 0.32044659909624, -0.2007490598786, 0.075537290131815 }
// centre at pos 2: {0.20841888850511, -0.73860142772332, 0, 0.75556602902867, -0.35548063466879, 0.20546361183919, -0.075366466980858}
else if (prev_id1 == -1)
return ( -3.5 * curr_value + 4.7338588676399 * next_value1 + -1.8896617418485 * next_value2 + 1.0666666666667 * next_value3 + -0.68332160435892 * next_value4 + 0.43912447856748 * next_value5 + -0.16666666666667 * next_value6)/h;
else if (prev_id2 == -1)
return ( -0.81430867141476 * prev_value1 + 0 * curr_value + 1.1519427380981 * next_value1 + -0.53286889603279 * next_value2 + 0.32044659909624 * next_value3 + -0.2007490598786 * next_value4 + 0.075537290131815 * next_value5)/h;
else if (prev_id3 == -1)
return ( 0.20841888850511 * prev_value2 + -0.73860142772332 * prev_value1 + 0 * curr_value + 0.75556602902867 * next_value1 + -0.35548063466879 * next_value2 + 0.20546361183919 * next_value3 + -0.075366466980858 * next_value4)/h;
// generated using https://github.com/lnw/finite-diff-weights
// assuming a the lobatto grid with points {-3,-2.4906716888357,-1.40654638041214,0,1.40654638041214,2.4906716888357,3}
// centre at pos 6: {0.16666666666667, -0.43912447856748, 0.68332160435892, -1.0666666666667, 1.8896617418485, -4.7338588676399, 3.5 }
// centre at pos 5: {-0.075537290131815, 0.2007490598786, -0.32044659909624, 0.53286889603279, -1.1519427380981, 0, 0.81430867141476 }
// centre at pos 4: {0.075366466980858, -0.20546361183919, 0.35548063466879, -0.75556602902867, 0, 0.73860142772332, -0.20841888850511}
else if (next_id1 == -1)
return ( 0.16666666666667 * prev_value6 + -0.43912447856748 * prev_value5 + 0.68332160435892 * prev_value4 + -1.0666666666667 * prev_value3 + 1.8896617418485 * prev_value2 + -4.7338588676399 * prev_value1 + 3.5 * curr_value )/h;
else if (next_id2 == -1)
return ( -0.075537290131815 * prev_value5 + 0.2007490598786 * prev_value4 + -0.32044659909624 * prev_value3 + 0.53286889603279 * prev_value2 + -1.1519427380981 * prev_value1 + 0 * curr_value + 0.81430867141476 * next_value1)/h;
else if (next_id3 == -1)
return ( 0.075366466980858 * prev_value4 + -0.20546361183919 * prev_value3 + 0.35548063466879 * prev_value2 + -0.75556602902867 * prev_value1 + 0 * curr_value + 0.73860142772332 * next_value1 + -0.20841888850511 * next_value2)/h;
}
else if (finite_diff_order == 5){
if ( prev_id2 > -1 && prev_id1 > -1 && next_id1 > -1 && next_id2 > -1 ){
// generated using https://github.com/lnw/finite-diff-weights
// assuming a the lobatto grid with points {-3,-2.4906716888357,-1.40654638041214,0,1.40654638041214,2.4906716888357,3}
// centre at pos 0: {0.035706928371056, -1.0933955997541, 0, 1.0933955997541, -0.035706928371056}
// centre at pos 1: {0.35921124012101, -2.2180302446983, 1.6211545002871, 0.2529151880879, -0.015250683797656}
// centre at pos 2: {0.3998165330722, -1.1763296553934, 0.48352837852975, 0.32874345055196, -0.035758706760461}
// centre at pos 3: {0.093999911479851, -0.52193296182726, 0, 0.52193296182726, -0.093999911479851}
// centre at pos 4: {0.035758706760461, -0.32874345055196, -0.48352837852975, 1.1763296553934, -0.3998165330722}
// centre at pos 5: {0.015250683797656, -0.2529151880879, -1.6211545002871, 2.2180302446983, -0.35921124012101}
switch(local_pos){
case(0):
return ( 0.035706928371056 * prev_value2 + -1.0933955997541 * prev_value1 + 0 * curr_value + 1.0933955997541 * next_value1 + -0.035706928371056 * next_value2 )/h;
case(1):
return ( 0.35921124012101 * prev_value2 + -2.2180302446983 * prev_value1 + 1.6211545002871 * curr_value + 0.2529151880879 * next_value1 + -0.015250683797656 * next_value2 )/h;
case(2):
return ( 0.3998165330722 * prev_value2 + -1.1763296553934 * prev_value1 + 0.48352837852975 * curr_value + 0.32874345055196 * next_value1 + -0.035758706760461 * next_value2 )/h;
case(3):
return ( 0.093999911479851 * prev_value2 + -0.52193296182726 * prev_value1 + 0 * curr_value + 0.52193296182726 * next_value1 + -0.093999911479851 * next_value2 )/h;
case(4):
return ( 0.035758706760461 * prev_value2 + -0.32874345055196 * prev_value1 + -0.48352837852975 * curr_value + 1.1763296553934 * next_value1 + -0.3998165330722 * next_value2 )/h;
case(5):
return ( 0.015250683797656 * prev_value2 + -0.2529151880879 * prev_value1 + -1.6211545002871 * curr_value + 2.2180302446983 * next_value1 + -0.35921124012101 * next_value2 )/h;
}
}
// generated using https://github.com/lnw/finite-diff-weights
// assuming a the lobatto grid with points {-3,-2.4906716888357,-1.40654638041214,0,1.40654638041214,2.4906716888357,3}
// centre at pos 0: {-3.1512062536842, 3.9301627535249, -0.98505481216702, 0.24193000589467, -0.03583169356836}
// centre at pos 1: {-0.98083020142501, 0.38287613952776, 0.72328921328621, -0.14557478380549, 0.020239632416528}
else if (prev_id1 == -1)
return ( -3.1512062536842 * curr_value + 3.9301627535249 * next_value1 + -0.98505481216702 * next_value2 + 0.24193000589467 * next_value3 + -0.03583169356836 * next_value4 )/h;
else if (prev_id2 == -1)
return ( -0.98083020142501 * prev_value1 + 0.38287613952776 * curr_value + 0.72328921328621 * next_value1 + -0.14557478380549 * next_value2 + 0.020239632416528 * next_value3 )/h;
// generated using https://github.com/lnw/finite-diff-weights
// assuming a the lobatto grid with points {-3,-2.4906716888357,-1.40654638041214,0,1.40654638041214,2.4906716888357,3}
// centre at pos 6: {0.03583169356836, -0.24193000589467, 0.98505481216702, -3.9301627535249, 3.1512062536842}
// centre at pos 5: {-0.020239632416528, 0.14557478380549, -0.72328921328621, -0.38287613952776, 0.98083020142501}
else if (next_id1 == -1)
return ( 0.03583169356836 * prev_value4 + -0.24193000589467 * prev_value3 + 0.98505481216702 * prev_value2 + -3.9301627535249 * prev_value1 + 3.1512062536842 * curr_value )/h;
else if (next_id2 == -1)
return ( -0.020239632416528 * prev_value3 + 0.14557478380549 * prev_value2 + -0.72328921328621 * prev_value1 + -0.38287613952776 * curr_value + 0.98083020142501 * next_value1 )/h;
}
}
return 0.0;
}
/*
* Evaluate cube gradients at grid points for simple equidistant grid. The results are stored to 'device_gradients'.
*
*/
template <int finite_diff_order, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z>
__global__ void
CubeEvaluator_evaluate_simple_grid_gradients(
const double* __restrict__ device_cube,
const size_t device_pitch,
const size_t device_shape_y,
const Grid3D* __restrict__ grid,
double* __restrict__ device_gradients_x,
double* __restrict__ device_gradients_y,
double* __restrict__ device_gradients_z,
// number of slices handled by this device
// in previous calls
int device_slice_offset,
// number of slices handled by all devices
// in previous calls
int slice_offset,
// number of slices handled by this call
int slice_count,
const double multiplier
) {
// The result array will be in fortran with indices x, y, z.
// This means that the x index will be the fastest to change.
int x, y, z;
getXYZ(&x, &y, &z);
const int grid_type_x = grid->axis[X_]->grid_type,
grid_type_y = grid->axis[Y_]->grid_type,
grid_type_z = grid->axis[Z_]->grid_type;
const double h_x = grid->axis[X_]->h[0];
const double h_y = grid->axis[Y_]->h[0];
const double h_z = grid->axis[Z_]->h[0];
// get the offset from the input cube pointer
int id = getCubeOffset3D(x, y, z+slice_offset, device_pitch, device_shape_y);
int local_id = getCubeOffset3D(x, y, z+device_slice_offset, device_pitch, device_shape_y);
bool valid_point = x >= 0
&& y >= 0
&& z+slice_offset >= 0
&& z < slice_count
&& x < grid->shape[X_]
&& y < grid->shape[Y_]
&& z+slice_offset < grid->shape[Z_];
if (!valid_point) id = -1;
// position within a cell. This is required because there is no
// translational symmetry by fractions of a cell, and this is relevant for
// finite diff weights.
int local_pos_x, local_pos_y, local_pos_z;
if(grid_type_x == 2 || grid_type_y == 2 || grid_type_z == 2){ // only read in case of lobatto
local_pos_x = x%(NLIP-1);
local_pos_y = y%(NLIP-1);
local_pos_z = (z+slice_offset)%(NLIP-1);
}
// evaluate gradient to z direction
if (evaluate_gradients_z) {
int prev_id1 = -1, prev_id2 = -1, prev_id3 = -1, prev_id4 = -1, prev_id5 = -1, prev_id6 = -1, prev_id7 = -1, prev_id8 = -1,
next_id1 = -1, next_id2 = -1, next_id3 = -1, next_id4 = -1, next_id5 = -1, next_id6 = -1, next_id7 = -1, next_id8 = -1;
if (finite_diff_order >= 2 && z + slice_offset -1 >= 0) {
prev_id1 = getCubeOffset3D(x, y, z+slice_offset-1, device_pitch, device_shape_y);
}
if (finite_diff_order >= 2 && z + slice_offset +1 < grid->shape[Z_]) {
next_id1 = getCubeOffset3D(x, y, z+slice_offset+1, device_pitch, device_shape_y);
}
if (finite_diff_order >= 3 && z + slice_offset -2 >= 0) {
prev_id2 = getCubeOffset3D(x, y, z+slice_offset-2, device_pitch, device_shape_y);
}
if (finite_diff_order >= 3 && z + slice_offset +2 < grid->shape[Z_]) {
next_id2 = getCubeOffset3D(x, y, z+slice_offset+2, device_pitch, device_shape_y);
}
if (finite_diff_order >= 4 && z + slice_offset -3 >= 0) {
prev_id3 = getCubeOffset3D(x, y, z+slice_offset-3, device_pitch, device_shape_y);
}
if (finite_diff_order >= 4 && z + slice_offset +3 < grid->shape[Z_]) {
next_id3 = getCubeOffset3D(x, y, z+slice_offset+3, device_pitch, device_shape_y);
}
if (finite_diff_order >= 5 && z + slice_offset -4 >= 0) {
prev_id4 = getCubeOffset3D(x, y, z+slice_offset-4, device_pitch, device_shape_y);
}
if (finite_diff_order >= 5 && z + slice_offset +4 < grid->shape[Z_]) {
next_id4 = getCubeOffset3D(x, y, z+slice_offset+4, device_pitch, device_shape_y);
}
if (finite_diff_order >= 6 && z + slice_offset -5 >= 0) {
prev_id5 = getCubeOffset3D(x, y, z+slice_offset-5, device_pitch, device_shape_y);
}
if (finite_diff_order >= 6 && z + slice_offset +5 < grid->shape[Z_]) {
next_id5 = getCubeOffset3D(x, y, z+slice_offset+5, device_pitch, device_shape_y);
}
if (finite_diff_order >= 7 && z + slice_offset -6 >= 0) {
prev_id6 = getCubeOffset3D(x, y, z+slice_offset-6, device_pitch, device_shape_y);
}
if (finite_diff_order >= 7 && z + slice_offset +6 < grid->shape[Z_]) {
next_id6 = getCubeOffset3D(x, y, z+slice_offset+6, device_pitch, device_shape_y);
}
if (finite_diff_order >= 8 && z + slice_offset -7 >= 0) {
prev_id7 = getCubeOffset3D(x, y, z+slice_offset-7, device_pitch, device_shape_y);
}
if (finite_diff_order >= 8 && z + slice_offset +7 < grid->shape[Z_]) {
next_id7 = getCubeOffset3D(x, y, z+slice_offset+7, device_pitch, device_shape_y);
}
if (finite_diff_order >= 9 && z + slice_offset -8 >= 0) {
prev_id8 = getCubeOffset3D(x, y, z+slice_offset-8, device_pitch, device_shape_y);
}
if (finite_diff_order >= 9 && z + slice_offset +8 < grid->shape[Z_]) {
next_id8 = getCubeOffset3D(x, y, z+slice_offset+8, device_pitch, device_shape_y);
}
const double value = evaluate_derivative<finite_diff_order>(id, prev_id1, prev_id2, prev_id3, prev_id4, prev_id5, prev_id6, prev_id7, prev_id8,
next_id1, next_id2, next_id3, next_id4, next_id5, next_id6, next_id7, next_id8,
device_cube, grid_type_z, local_pos_z, h_z);
if (valid_point) device_gradients_z[local_id] = multiplier * value;
}
// evaluate gradient to y direction
if (evaluate_gradients_y) {
int prev_id1 = -1, prev_id2 = -1, prev_id3 = -1, prev_id4 = -1, prev_id5 = -1, prev_id6 = -1, prev_id7 = -1, prev_id8 = -1,
next_id1 = -1, next_id2 = -1, next_id3 = -1, next_id4 = -1, next_id5 = -1, next_id6 = -1, next_id7 = -1, next_id8 = -1;
if (finite_diff_order >= 2 && y -1 >= 0) {
prev_id1 = getCubeOffset3D(x, y-1, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 2 && y + 1 < grid->shape[Y_]) {
next_id1 = getCubeOffset3D(x, y+1, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 3 && y - 2 >= 0) {
prev_id2 = getCubeOffset3D(x, y-2, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 3 && y + 2 < grid->shape[Y_]) {
next_id2 = getCubeOffset3D(x, y+2, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 4 && y - 3 >= 0) {
prev_id3 = getCubeOffset3D(x, y-3, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 4 && y + 3 < grid->shape[Y_]) {
next_id3 = getCubeOffset3D(x, y+3, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 5 && y - 4 >= 0) {
prev_id4 = getCubeOffset3D(x, y-4, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 5 && y + 4 < grid->shape[Y_]) {
next_id4 = getCubeOffset3D(x, y+4, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 6 && y - 5 >= 0) {
prev_id5 = getCubeOffset3D(x, y-5, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 6 && y + 5 < grid->shape[Y_]) {
next_id5 = getCubeOffset3D(x, y+5, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 7 && y - 6 >= 0) {
prev_id6 = getCubeOffset3D(x, y-6, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 7 && y + 6 < grid->shape[Y_]) {
next_id6 = getCubeOffset3D(x, y+6, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 8 && y - 7 >= 0) {
prev_id7 = getCubeOffset3D(x, y-7, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 8 && y + 7 < grid->shape[Y_]) {
next_id7 = getCubeOffset3D(x, y+7, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 9 && y - 8 >= 0) {
prev_id8 = getCubeOffset3D(x, y-8, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 9 && y + 8 < grid->shape[Y_]) {
next_id8 = getCubeOffset3D(x, y+8, z+slice_offset, device_pitch, device_shape_y);
}
const double value = evaluate_derivative<finite_diff_order>(id, prev_id1, prev_id2, prev_id3, prev_id4, prev_id5, prev_id6, prev_id7, prev_id8,
next_id1, next_id2, next_id3, next_id4, next_id5, next_id6, next_id7, next_id8,
device_cube, grid_type_y, local_pos_y, h_y);
if (valid_point) device_gradients_y[local_id] = multiplier * value;
}
// evaluate gradient to x direction
if (evaluate_gradients_x) {
int prev_id1 = -1, prev_id2 = -1, prev_id3 = -1, prev_id4 = -1, prev_id5 = -1, prev_id6 = -1, prev_id7 = -1, prev_id8 = -1,
next_id1 = -1, next_id2 = -1, next_id3 = -1, next_id4 = -1, next_id5 = -1, next_id6 = -1, next_id7 = -1, next_id8 = -1;
if (finite_diff_order >= 2 && x - 1 >= 0) {
prev_id1 = getCubeOffset3D(x-1, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 2 && x + 1 < grid->shape[X_]) {
next_id1 = getCubeOffset3D(x+1, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 3 && x - 2 >= 0) {
prev_id2 = getCubeOffset3D(x-2, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 3 && x + 2 < grid->shape[X_]) {
next_id2 = getCubeOffset3D(x+2, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 4 && x - 3 >= 0) {
prev_id3 = getCubeOffset3D(x-3, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 4 && x + 3 < grid->shape[X_]) {
next_id3 = getCubeOffset3D(x+3, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 5 && x - 4 >= 0) {
prev_id4 = getCubeOffset3D(x-4, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 5 && x + 4 < grid->shape[X_]) {
next_id4 = getCubeOffset3D(x+4, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 6 && x - 5 >= 0) {
prev_id5 = getCubeOffset3D(x-5, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 6 && x + 5 < grid->shape[X_]) {
next_id5 = getCubeOffset3D(x+5, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 7 && x - 6 >= 0) {
prev_id6 = getCubeOffset3D(x-6, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 7 && x + 6 < grid->shape[X_]) {
next_id6 = getCubeOffset3D(x+6, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 8 && x - 7 >= 0) {
prev_id7 = getCubeOffset3D(x-7, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 8 && x + 7 < grid->shape[X_]) {
next_id7 = getCubeOffset3D(x+7, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 9 && x - 8 >= 0) {
prev_id8 = getCubeOffset3D(x-8, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 9 && x + 8 < grid->shape[X_]) {
next_id8 = getCubeOffset3D(x+8, y, z+slice_offset, device_pitch, device_shape_y);
}
const double value = evaluate_derivative<finite_diff_order>(id, prev_id1, prev_id2, prev_id3, prev_id4, prev_id5, prev_id6, prev_id7, prev_id8,
next_id1, next_id2, next_id3, next_id4, next_id5, next_id6, next_id7, next_id8,
device_cube, grid_type_x, local_pos_x, h_x);
if (valid_point) device_gradients_x[local_id] = multiplier * value;
}
return;
}
/*
* Evaluate values of the radial gradients at bubbles, i.e.,
* the radial gradients of input bubbles are evaluated to the
* result bubbles values.
*
* @param nlip - number of lagrange integration polyniomials per
* cell, i.e., the number of grid points per cell
*/
template <int nlip>
__device__ inline void BubblesEvaluator_evaluate_radial_gradients(
const Grid1D* __restrict__ grid,
// maximum quantum number 'l'
const int lmax,
// k value for the bubble
const int &k,
// constant pointer to a double array representing the
// input bubbles f_Alm coefficients
const double* __restrict__ f,
// pointer to a variable double array representing the
// output bubbles g_Alm coefficients
double* result,
// global offset in warps
const int warp_offset
) {
int global_warp_id, thread_order_number, cells_per_warp;
bool valid_point = true;
const int WARP_SIZE = 32;
// order number of handled point
const int id = threadIdx.x + blockIdx.x * blockDim.x;
// if nlip is 7, each warp of 32 handles 5 cells
if (nlip == 7) {
// get the global warp order number
global_warp_id = id / WARP_SIZE + warp_offset;
cells_per_warp = 5;
// get the order number of thread within the warp
thread_order_number = threadIdx.x % WARP_SIZE;
if (thread_order_number == 31) valid_point = false;
}
// number of cells
const int ncell = grid->ncell;
// get the order number of cell
int icell = global_warp_id * cells_per_warp + thread_order_number / (nlip-1);
// order number of point in cell
int in_cell_point = thread_order_number % (nlip-1);
// let's set it up so that the nlip:th point in cell belongs to the previous cell
if (in_cell_point == 0 && icell > 0) {
icell -= 1;
in_cell_point = nlip;
}
if (thread_order_number == 0 && in_cell_point != 0) valid_point = false;
// if the cell number is not within the evaluated range, we do not evaluate the
// values
bool participant = true;
if (icell >= ncell ) {
participant = false;
}
double in_cell_coordinate = (double)(in_cell_point-3);
// read the LIPs in the shared memory
__shared__ double lip[nlip * nlip];
read_lip<nlip, nlip>(grid->lip, threadIdx.x, lip);
__shared__ double derivative_lip[(nlip-1) * nlip];
read_lip<nlip-1, nlip>(grid->derivative_lip, threadIdx.x, derivative_lip);
if (participant) {
// evaluate the derivative polynomials
double derivative_polynomials[nlip];
evaluate_polynomials<nlip-1, nlip>(derivative_lip, in_cell_coordinate, derivative_polynomials);
double one_per_grid_step = 1.0 / grid->h[icell];
// finally, multiply the derivative polynomials with 1 / grid_step
for (int j = 0; j < nlip; j++) {
derivative_polynomials[j] *= one_per_grid_step;
}
// get the initial address:
int address = icell * (nlip-1) + in_cell_point;
for (int n = 0; n < (lmax+1) * (lmax+1); n++) {
// get the input function values
double value = f[address];
// and evaluate the radial coefficients
double temp = evaluate_coefficients_shuffle<nlip>(derivative_polynomials, value, thread_order_number, in_cell_point);
// if the point is valid, stored the result
if (valid_point) result[address] = temp;
// add the address by one n index:
address += ncell * nlip;
}
}
}
/**************************************************************
* Error checking *
**************************************************************/
__host__ inline void check_eval_errors(const char *filename, const int line_number) {
//#ifdef DEBUG_CUDA
hipDeviceSynchronize();
//#endif
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
printf("CUDA error at %s:%i: %s\n", filename, line_number, hipGetErrorString(error));
exit(-1);
}
}
/**************************************************************
* BubblesEvaluator-implementation *
**************************************************************/
BubblesEvaluator::BubblesEvaluator(StreamContainer *streamContainer) {
this->streamContainer = streamContainer;
}
void BubblesEvaluator::setBubbles(Bubbles *bubbles) {
this->bubbles = bubbles;
}
/*
* Evaluate the bubbles at grid points.
*
* @param bubbles - The bubbles that are evaluated in to the grid
* @param grid - The grid associated with all the output cubes
*/
void BubblesEvaluator::evaluateGrid(Grid3D *grid, CudaCube *result_cube, CudaCube *gradient_cube_x, CudaCube *gradient_cube_y, CudaCube *gradient_cube_z, int gradient_direction, int fin_diff_ord) {
if (gradient_direction == X_) {
this->bubbles->inject(grid, result_cube, 0, gradient_cube_x,
gradient_cube_y, gradient_cube_z, false, true, false, false);
}
else if (gradient_direction == Y_) {
this->bubbles->inject(grid, result_cube, 0, gradient_cube_x,
gradient_cube_y, gradient_cube_z, false, false, true, false);
}
else if (gradient_direction == Z_) {
this->bubbles->inject(grid, result_cube, 0, gradient_cube_x,
gradient_cube_y, gradient_cube_z, false, false, false, true);
}
else if (gradient_direction == 3) {
this->bubbles->inject(grid, result_cube, 0, gradient_cube_x,
gradient_cube_y, gradient_cube_z, true, true, true, true);
}
else {
this->bubbles->inject(grid, result_cube);
}
}
/*
* Deallocate the device and host memory allocated for this object.
*/
void BubblesEvaluator::destroy() {
this->streamContainer = NULL;
this->bubbles = NULL;
}
/**************************************************************
* CubeEvaluator-implementation *
**************************************************************/
CubeEvaluator::CubeEvaluator(StreamContainer *streamContainer) {
this->streamContainer = streamContainer;
}
/*
* Deallocate the device and host memory allocated for this object.
*/
void CubeEvaluator::destroy() {
this->streamContainer = NULL;
this->input_cube = NULL;
this->grid = NULL;
}
/*
* Set the input cube from which the evaluation is performed.
*
* @param input_cube - CudaCube object from which the evaluation is performed. The shape
* of the data should be according to the given grid
*/
void CubeEvaluator::setInputCube(CudaCube *input_cube) {
this->input_cube = input_cube;
}
/*
* Set the input grid from which the evaluation is performed.
*
* @param input_grid - Grid3D object defining the shape of the cube for which the evaluation is performed.
*/
void CubeEvaluator::setInputGrid(Grid3D *input_grid) {
this->grid = input_grid;
}
/*
* Evaluate the cube at preset points. The results are stored in the device memory.
* @param result_points - Points-object in which the results are stored, if gradient_direction=0-2, the results are stored here
* @param gradient_points_x - Points-object in which the gradiends in x-direction are stored, if gradient_direction=3
* @param gradient_points_y - Points-object in which the gradiends in y-direction are stored, if gradient_direction=3
* @param gradient_points_z - Points-object in which the gradiends in z-direction are stored, if gradient_direction=3
* @param gradient_direction - possible values X_ = 0, Y_ = 1, Z_ = 2, (X_, Y_, Z_) = 3 && this->evaluateGradients
* anything else: no gradients
*/
void CubeEvaluator::evaluatePoints(Points *result_points,
Points *gradient_points_x,
Points *gradient_points_y,
Points *gradient_points_z,
int gradient_direction) {
int warp_size = 32;
int total_warp_count = result_points->point_coordinates->number_of_points / warp_size + ((result_points->point_coordinates->number_of_points % warp_size) > 0);
int point_offset = 0;
int *cube_memory_shape = this->input_cube->getDeviceMemoryShape();
check_eval_errors(__FILE__, __LINE__);
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// allocate space for device results and device points
int device_warp_count = total_warp_count / this->streamContainer->getNumberOfDevices()
+ ((total_warp_count % this->streamContainer->getNumberOfDevices()) > device);
int device_point_count = device_warp_count * warp_size;
int device_point_offset = 0;
// get the order number of 'device' in cube's streamcontainer
int cube_device = this->input_cube->getStreamContainer()->getDeviceOrderNumber(this->streamContainer->getDeviceNumber(device));
check_eval_errors(__FILE__, __LINE__);
// get the pointers to the device points & results
double *device_points_ptr = result_points->point_coordinates->device_coordinates[device];
double *device_results_ptr = result_points->device_values[device];
double *device_gradients_x_ptr;
double *device_gradients_y_ptr;
double *device_gradients_z_ptr;
if (gradient_direction == 3) {
device_gradients_x_ptr = gradient_points_x->device_values[device];
device_gradients_y_ptr = gradient_points_y->device_values[device];
device_gradients_z_ptr = gradient_points_z->device_values[device];
}
else if (gradient_direction < 3 && gradient_direction >= 0) {
device_gradients_x_ptr = result_points->device_values[device];
device_gradients_y_ptr = result_points->device_values[device];
device_gradients_z_ptr = result_points->device_values[device];
}
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// get the number of points that are in the responsibility of this stream
int stream_warp_count = device_warp_count / this->streamContainer->getStreamsPerDevice()
+ ((device_warp_count % streamContainer->getStreamsPerDevice()) > stream);
int stream_point_count = stream_warp_count * warp_size;
// make sure that the last stream does not go over board
if (stream_point_count + point_offset > result_points->point_coordinates->number_of_points) {
stream_point_count = result_points->point_coordinates->number_of_points - point_offset;
}
if (stream_point_count > 0) {
// set the result to zero
check_eval_errors(__FILE__, __LINE__);
int grid_size = (stream_point_count + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (gradient_direction == X_) {
hipLaunchKernelGGL(( CubeEvaluator_evaluate_points <false, true, false, false>)
, dim3(grid_size), dim3(BLOCK_SIZE), 0,
*this->streamContainer->getStream(device, stream) ,
this->input_cube->getDevicePointer(cube_device),
this->input_cube->getDevicePitch(cube_device),
cube_memory_shape[Y_],
// TODO: replace the cube_device with grid_device in below line, probably does not
// matter but we have to be careful.
grid->device_copies[cube_device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
stream_point_count,
device_point_offset,
1.0
);
}
else if (gradient_direction == Y_) {
hipLaunchKernelGGL(( CubeEvaluator_evaluate_points <false, false, true, false>)
, dim3(grid_size), dim3(BLOCK_SIZE), 0,
*this->streamContainer->getStream(device, stream) ,
this->input_cube->getDevicePointer(cube_device),
this->input_cube->getDevicePitch(cube_device),
cube_memory_shape[Y_],
// TODO: replace the cube_device with grid_device in below line, probably does not
// matter but we have to be careful.
this->grid->device_copies[cube_device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
stream_point_count,
device_point_offset,
1.0
);
}
else if (gradient_direction == Z_) {
hipLaunchKernelGGL(( CubeEvaluator_evaluate_points <false, false, false, true>)
, dim3(grid_size), dim3(BLOCK_SIZE), 0,
*this->streamContainer->getStream(device, stream) ,
this->input_cube->getDevicePointer(cube_device),
this->input_cube->getDevicePitch(cube_device),
cube_memory_shape[Y_],
// TODO: replace the cube_device with grid_device in below line, probably does not
// matter but we have to be careful.
this->grid->device_copies[cube_device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
stream_point_count,
device_point_offset,
1.0
);
}
else if (gradient_direction == 3) {
hipLaunchKernelGGL(( CubeEvaluator_evaluate_points <true, true, true, true>)
, dim3(grid_size), dim3(BLOCK_SIZE), 0,
*this->streamContainer->getStream(device, stream) ,
this->input_cube->getDevicePointer(cube_device),
this->input_cube->getDevicePitch(cube_device),
cube_memory_shape[Y_],
// TODO: replace the cube_device with grid_device in below line, probably does not
// matter but we have to be careful.
this->grid->device_copies[cube_device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
stream_point_count,
device_point_offset,
1.0
);
}
else {
hipLaunchKernelGGL(( CubeEvaluator_evaluate_points <true, false, false, false>)
, dim3(grid_size), dim3(BLOCK_SIZE), 0,
*this->streamContainer->getStream(device, stream) ,
this->input_cube->getDevicePointer(cube_device),
this->input_cube->getDevicePitch(cube_device),
cube_memory_shape[Y_],
// TODO: replace the cube_device with grid_device in below line, probably does not
// matter but we have to be careful.
this->grid->device_copies[cube_device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
stream_point_count,
device_point_offset,
1.0
);
}
}
check_eval_errors(__FILE__, __LINE__);
// add the pointers
point_offset += stream_point_count;
device_point_offset += stream_point_count;
}
check_eval_errors(__FILE__, __LINE__);
}
}
/*
* Evaluate the cube at the points of grid. The results are stored in the device memory
* in the result_cube and gradient_cubes. The latter only occurs if gradient_direction == 3.
* true.
*
* @param grid - The grid associated with all the input and output cubes
* @param results_cube - CudaCube where the results are stored, if gradient direction is 0-2, the gradients will be stored here
* @param gradients_cube_x - CudaCube where the x-gradients are stored if the gradient_direction=3
* @param gradients_cube_y - CudaCube where the y-gradients are stored if the gradient_direction=3
* @param gradients_cube_z - CudaCube where the z-gradients are stored if the gradient_direction=3
* @param gradient_direction - possible values X_ = 0, Y_ = 1, Z_ = 2, (X_, Y_, Z_) = 3 && this->evaluateGradients
* anything else: no gradients
*
*/
void CubeEvaluator::evaluateGrid(Grid3D *grid,
CudaCube *result_cube,
CudaCube *gradient_cube_x,
CudaCube *gradient_cube_y,
CudaCube *gradient_cube_z,
const int gradient_direction,
const int finite_diff_order) {
check_eval_errors(__FILE__, __LINE__);
// printf("fin diff order in evaluateGrid: %i, %i \n", finite_diff_order, gradient_direction);
int total_slice_count = result_cube->getShape(Z_);
// the minimum l is 0 always in the multiplication
int device_slice_count;
// get the input cube pointer
// TODO: we are assuming here, that the input and output cubes have the same
// memory shapes, this is probably not the case in all occasions in the future
double **device_input_cubes = this->input_cube->getDeviceCubes();
// get the pointer arrays from the cubes
double **device_cubes = result_cube->getDeviceCubes();
double **device_gradients_x;
double **device_gradients_y;
double **device_gradients_z;
// get the device gradient result pointers
if (gradient_direction < 3) {
device_gradients_x = result_cube->getDeviceCubes();
device_gradients_y = result_cube->getDeviceCubes();
device_gradients_z = result_cube->getDeviceCubes();
}
else {
device_gradients_x = gradient_cube_x->getDeviceCubes();
device_gradients_y = gradient_cube_y->getDeviceCubes();
device_gradients_z = gradient_cube_z->getDeviceCubes();
}
size_t *device_pitches = result_cube->getDevicePitches();
int *device_memory_shape = result_cube->getDeviceMemoryShape();
// init some stuff to help calculate the launch parameters
// NOTE: these are for nlip: 7
//int cells_per_block = BLOCK_SIZE / 32 * 5;
int warps_per_string = grid->axis[X_]->ncell / 5 + 1;
int warps_per_slice = grid->axis[Y_]->ncell * warps_per_string;
int warps_per_block = BLOCK_SIZE / 32;
int slice_offset = 0;
// copy the cubes to the device & execute the kernels
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
// set the used device (gpu)
this->streamContainer->setDevice(device);
//double *dev_cube = device_cubes[device];
double *dev_input_cube = device_input_cubes[device];
double *dev_gradient_x = device_gradients_x[device];
double *dev_gradient_y = device_gradients_y[device];
double *dev_gradient_z = device_gradients_z[device];
int device_slice_offset = 0;
// calculate the number of vectors this device handles
device_slice_count = total_slice_count / this->streamContainer->getNumberOfDevices()
+ ((total_slice_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream++) {
// determine the count of vectors handled by this stream
int slice_count = device_slice_count / this->streamContainer->getStreamsPerDevice()
+ ((device_slice_count % this->streamContainer->getStreamsPerDevice()) > stream);
check_eval_errors(__FILE__, __LINE__);
if (slice_count > 0) {
// calculate the launch configuration for the f1-inject
//int grid_size = warps_per_slice * slice_count * warps_per_block + 1;
dim3 block, launch_grid;
result_cube->getLaunchConfiguration(&launch_grid, &block, slice_count, BLOCK_SIZE);
// call the kernel
if (gradient_direction == X_ && finite_diff_order==7) {
hipLaunchKernelGGL(( CubeEvaluator_evaluate_simple_grid_gradients <7, true, false, false>)
, dim3(launch_grid), dim3(block), 0,
*this->streamContainer->getStream(device, stream) ,
dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
if (gradient_direction == X_ && finite_diff_order==9) {
hipLaunchKernelGGL(( CubeEvaluator_evaluate_simple_grid_gradients <9, true, false, false>)
, dim3(launch_grid), dim3(block), 0,
*this->streamContainer->getStream(device, stream) ,
dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
else if (gradient_direction == Y_ && finite_diff_order==7) {
hipLaunchKernelGGL(( CubeEvaluator_evaluate_simple_grid_gradients <7, false, true, false>)
, dim3(launch_grid), dim3(block), 0,
*this->streamContainer->getStream(device, stream) ,
dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
else if (gradient_direction == Y_ && finite_diff_order==9) {
hipLaunchKernelGGL(( CubeEvaluator_evaluate_simple_grid_gradients <9, false, true, false>)
, dim3(launch_grid), dim3(block), 0,
*this->streamContainer->getStream(device, stream) ,
dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
else if (gradient_direction == Z_ && finite_diff_order==7) {
hipLaunchKernelGGL(( CubeEvaluator_evaluate_simple_grid_gradients <7, false, false, true>)
, dim3(launch_grid), dim3(block), 0,
*this->streamContainer->getStream(device, stream) ,
dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
else if (gradient_direction == Z_ && finite_diff_order==9) {
hipLaunchKernelGGL(( CubeEvaluator_evaluate_simple_grid_gradients <9, false, false, true>)
, dim3(launch_grid), dim3(block), 0,
*this->streamContainer->getStream(device, stream) ,
dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
else if (gradient_direction == 3 && finite_diff_order==7) {
hipLaunchKernelGGL(( CubeEvaluator_evaluate_simple_grid_gradients <7, true, true, true>)
, dim3(launch_grid), dim3(block), 0,
*this->streamContainer->getStream(device, stream) ,
dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
else if (gradient_direction == 3 && finite_diff_order==9) {
hipLaunchKernelGGL(( CubeEvaluator_evaluate_simple_grid_gradients <9, true, true, true>)
, dim3(launch_grid), dim3(block), 0,
*this->streamContainer->getStream(device, stream) ,
dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
check_eval_errors(__FILE__, __LINE__);
// increase the address by the number of vectors in this array
device_slice_offset += slice_count;
slice_offset += slice_count;
}
}
}
}
/********************************************
* Fortran interfaces for Evaluator *
********************************************/
extern "C" void evaluator_evaluate_grid_cuda(Evaluator *evaluator, Grid3D *grid, CudaCube *result_cube, CudaCube *gradient_cube_x, CudaCube *gradient_cube_y, CudaCube *gradient_cube_z, int gradient_direction, int fin_diff_ord) {
evaluator->evaluateGrid(grid, result_cube, gradient_cube_x, gradient_cube_y, gradient_cube_z, gradient_direction, fin_diff_ord);
}
extern "C" void evaluator_evaluate_grid_without_gradients_cuda(Evaluator *evaluator, Grid3D *grid, CudaCube *result_cube, int fin_diff_ord) {
evaluator->evaluateGrid(grid, result_cube, NULL, NULL, NULL, -1, fin_diff_ord);
}
extern "C" void evaluator_evaluate_grid_x_gradients_cuda(Evaluator *evaluator, Grid3D *grid, CudaCube *result_cube, int fin_diff_ord) {
evaluator->evaluateGrid(grid, result_cube, NULL, NULL, NULL, X_, fin_diff_ord);
}
extern "C" void evaluator_evaluate_grid_y_gradients_cuda(Evaluator *evaluator, Grid3D *grid, CudaCube *result_cube, int fin_diff_ord) {
evaluator->evaluateGrid(grid, result_cube, NULL, NULL, NULL, Y_, fin_diff_ord);
}
extern "C" void evaluator_evaluate_grid_z_gradients_cuda(Evaluator *evaluator, Grid3D *grid, CudaCube *result_cube, int fin_diff_ord) {
evaluator->evaluateGrid(grid, result_cube, NULL, NULL, NULL, Z_, fin_diff_ord);
}
extern "C" void evaluator_evaluate_points_cuda(Evaluator *evaluator, Points *result_points, Points *gradient_points_x, Points *gradient_points_y, Points *gradient_points_z, int gradient_direction) {
evaluator->evaluatePoints(result_points, gradient_points_x, gradient_points_y, gradient_points_z, gradient_direction);
}
extern "C" void evaluator_evaluate_points_without_gradients_cuda(Evaluator *evaluator, Points *result_points) {
evaluator->evaluatePoints(result_points, NULL, NULL, NULL, -1);
}
extern "C" void evaluator_evaluate_points_x_gradients_cuda(Evaluator *evaluator, Points *result_points) {
evaluator->evaluatePoints(result_points, NULL, NULL, NULL, X_);
}
extern "C" void evaluator_evaluate_points_y_gradients_cuda(Evaluator *evaluator, Points *result_points) {
evaluator->evaluatePoints(result_points, NULL, NULL, NULL, Y_);
}
extern "C" void evaluator_evaluate_points_z_gradients_cuda(Evaluator *evaluator, Points *result_points) {
evaluator->evaluatePoints(result_points, NULL, NULL, NULL, Z_);
}
extern "C" void evaluator_destroy_cuda(Evaluator *evaluator) {
evaluator->destroy();
}
/********************************************
* Fortran interfaces for BubblesEvaluator *
********************************************/
extern "C" BubblesEvaluator *bubblesevaluator_init_cuda(StreamContainer *streamContainer) {
BubblesEvaluator *new_bubbles_evaluator = new BubblesEvaluator(streamContainer);
return new_bubbles_evaluator;
}
extern "C" void bubblesevaluator_set_bubbles_cuda(BubblesEvaluator *bubbles_evaluator, Bubbles *bubbles) {
bubbles_evaluator->setBubbles(bubbles);
}
/********************************************
* Fortran interfaces for CubeEvaluator *
********************************************/
extern "C" CubeEvaluator *cubeevaluator_init_cuda(StreamContainer *streamContainer) {
CubeEvaluator *new_cube_evaluator = new CubeEvaluator(streamContainer);
return new_cube_evaluator;
}
extern "C" void cubeevaluator_set_input_cube_cuda(CubeEvaluator *cube_evaluator, CudaCube *cube) {
cube_evaluator->setInputCube(cube);
}
extern "C" void cubeevaluator_set_input_grid_cuda(CubeEvaluator *cube_evaluator, Grid3D *grid) {
cube_evaluator->setInputGrid(grid);
}
| 6677943ecc3acdb53a0f4d2ecd7f5e34943971b8.cu | /*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
#include "streamcontainer.h"
#include "memory_leak_operators.h"
#include "evaluators.h"
#include "cube.h"
#include "bubbles_cuda.h"
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#define X_ 0
#define Y_ 1
#define Z_ 2
#define NLIP 7
#define BLOCK_SIZE 256
#define FULL_MASK 0xffffffff
extern __shared__ double shared_memory[];
/*
* NOTE: this method assumes that the grid is equidistant (in the sense that all cells have equal length)
*/
template <int nlip>
__device__ inline
void calculate_icell_equidistant(const Grid1D *grid,
const double coordinate,
int &icell,
double &in_cell_coordinate,
double &one_per_grid_step
) {
double grid_step = grid->h[0];
one_per_grid_step = 1.0 / grid_step;
double start = grid->d[0];
double cell_length = (nlip-1) * grid_step;
icell = (int)((coordinate - start) / (cell_length));
if (coordinate - start < 0.0) icell = -1;
double cell_center = start + ((double)icell + 0.5) * cell_length;
in_cell_coordinate = (coordinate - cell_center) * one_per_grid_step;
}
/*
* Read Lagrange interpolation polynomials into the shared memory.
*/
template <int nlip, int result_length>
__device__ inline
void read_lip(double *device_lip, int thread_id, double *shared_memory_lip) {
__syncthreads();
if (thread_id < nlip * result_length) {
shared_memory_lip[thread_id] = device_lip[thread_id];
}
__syncthreads();
}
/*
*
* NOTE: polynomials must be an array of size 8
*/
template <int nlip, int result_length>
__device__ inline
void evaluate_polynomials(double *lip, double x, double *polynomials) {
for (int i = 0; i < result_length; i++) {
// init the polynomial as the first value of the lip
polynomials[i] = lip[i*nlip];
for (int k = 1; k < nlip; k++) {
polynomials[i] = lip[i*nlip + k] + x*polynomials[i];
}
}
if (result_length < 1) polynomials[0] = 0.0;
if (result_length < 2) polynomials[1] = 0.0;
if (result_length < 3) polynomials[2] = 0.0;
if (result_length < 4) polynomials[3] = 0.0;
if (result_length < 5) polynomials[4] = 0.0;
if (result_length < 6) polynomials[5] = 0.0;
if (result_length < 7) polynomials[6] = 0.0;
if (result_length < 8) polynomials[7] = 0.0;
}
/*
* Evaluates sum of 'coefficients[i]' times 'polynomials[i]' for 'coefficients'
* that reside in device memory.
*
* NOTE: polynomials must be an array of size 8
*/
template <int nlip>
__device__ inline
double evaluate_coefficients(double *polynomials, const double* __restrict__ c, int address, int thread_id) {
const int EVALUATE_BLOCK_SIZE = 8;
// get the thread rank within its little block of size 'EVALUATE_BLOCK_SIZE'
int thread_rank = thread_id%EVALUATE_BLOCK_SIZE;
// let us use the results array as temp array
double temp_results[EVALUATE_BLOCK_SIZE];
// TODO: make this more generic
int addresses[EVALUATE_BLOCK_SIZE];
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
addresses[0] =__shfl(address, 0, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[1] =__shfl(address, 1, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[2] =__shfl(address, 2, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[3] =__shfl(address, 3, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[4] =__shfl(address, 4, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[5] =__shfl(address, 5, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[6] =__shfl(address, 6, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[7] =__shfl(address, 7, EVALUATE_BLOCK_SIZE) + thread_rank;
#elif (__CUDA_ARCH__ >= 700)
addresses[0] =__shfl_sync(FULL_MASK, address, 0, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[1] =__shfl_sync(FULL_MASK, address, 1, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[2] =__shfl_sync(FULL_MASK, address, 2, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[3] =__shfl_sync(FULL_MASK, address, 3, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[4] =__shfl_sync(FULL_MASK, address, 4, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[5] =__shfl_sync(FULL_MASK, address, 5, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[6] =__shfl_sync(FULL_MASK, address, 6, EVALUATE_BLOCK_SIZE) + thread_rank;
addresses[7] =__shfl_sync(FULL_MASK, address, 7, EVALUATE_BLOCK_SIZE) + thread_rank;
#endif
int reg = thread_rank;
if (thread_rank < nlip) {
temp_results[reg] = __ldg(&c[addresses[0]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[1]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[2]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[3]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[4]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[5]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[6]]);
reg = ((reg == 0) ? EVALUATE_BLOCK_SIZE-1 : reg - 1);
temp_results[reg] = __ldg(&c[addresses[7]]);
}
else {
temp_results[0] = 0.0;
temp_results[1] = 0.0;
temp_results[2] = 0.0;
temp_results[3] = 0.0;
temp_results[4] = 0.0;
temp_results[5] = 0.0;
temp_results[6] = 0.0;
temp_results[7] = 0.0;
}
reg = thread_rank;
double result = temp_results[0] * polynomials[reg];
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[1], thread_rank+1, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[2], thread_rank+2, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[3], thread_rank+3, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[4], thread_rank+4, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[5], thread_rank+5, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[6], thread_rank+6, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl(temp_results[7], thread_rank+7, EVALUATE_BLOCK_SIZE) * polynomials[reg];
#elif (__CUDA_ARCH__ >= 700)
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[1], thread_rank+1, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[2], thread_rank+2, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[3], thread_rank+3, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[4], thread_rank+4, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[5], thread_rank+5, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[6], thread_rank+6, EVALUATE_BLOCK_SIZE) * polynomials[reg];
reg = ((reg == EVALUATE_BLOCK_SIZE-1) ? 0 : reg + 1);
result += __shfl_sync(FULL_MASK, temp_results[7], thread_rank+7, EVALUATE_BLOCK_SIZE) * polynomials[reg];
#endif
return result;
}
/*
* Evaluates sum of 'coefficients[i]' times 'polynomials[i]' for 'coefficients'
* that reside in registers.
*
* NOTE: 'polynomials' and 'c' must be arrays of size nlip
*/
template <int nlip>
__device__ inline
double evaluate_coefficients_register(double *polynomials, double *c) {
double result = 0.0;
for (int ilip = 0; ilip < nlip; ilip ++) {
result += polynomials[ilip] * c[ilip];
}
return result;
}
/*
* Evaluates sum of 'coefficients[i]' times 'polynomials[i]' for 'coefficients'
* that reside in registers that are spread within the neighbouring threads.
* Also the 'polynomials should lie in registers'
*
* NOTE: 'polynomials' and 'c' must be arrays of size nlip
*/
template <int nlip>
__device__ inline
double evaluate_coefficients_shuffle(double *polynomials, double coefficient, int thread_order_number, int x_modulo) {
double result = 0.0;
if (nlip == 7) {
// get the number of thread having the first coefficient
int first_of_cell = thread_order_number - x_modulo;
// do not take the 32:nd thread in to the games because each warp is handling
// 5 cells, i.e., 6*5 + 1 points
if (thread_order_number < 31) {
result = __shfl(coefficient, first_of_cell , 32) * polynomials[0];
result += __shfl(coefficient, first_of_cell+1, 32) * polynomials[1];
result += __shfl(coefficient, first_of_cell+2, 32) * polynomials[2];
result += __shfl(coefficient, first_of_cell+3, 32) * polynomials[3];
result += __shfl(coefficient, first_of_cell+4, 32) * polynomials[4];
result += __shfl(coefficient, first_of_cell+5, 32) * polynomials[5];
result += __shfl(coefficient, first_of_cell+6, 32) * polynomials[6];
}
}
return result;
}
/*
* Evaluate cube at 'points'
*
* if calling to the version with evaluate_gradients=true, we are also evaluating the
* gradients and storing the results to 'device_gradients'
*/
template <bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z>
__global__ void
#if (__CUDA_ARCH__ <= 350)
__launch_bounds__(BLOCK_SIZE)
#else
__launch_bounds__(BLOCK_SIZE)
#endif
CubeEvaluator_evaluate_points(const double* __restrict__ device_cube,
const size_t device_pitch,
const size_t device_shape_y,
const Grid3D* __restrict__ grid,
double* __restrict__ result_array,
double* __restrict__ device_gradients_x,
double* __restrict__ device_gradients_y,
double* __restrict__ device_gradients_z,
// a 3d array, where the x coordinates are first,
// then y coordinates, and finally the z coordinates. This ordering
// is selected to get coalesced memory reads
const double* __restrict__ points,
// total number of points evaluated by this device
const int device_number_of_points,
// number of points in this kernel call
const int point_count,
// device_point_offset
const int device_point_offset,
const double multiplier
) {
// Get the point order number within this kernel call
int id = blockIdx.x * blockDim.x + threadIdx.x;
double value, gradient[3];
double in_cell_coordinate_x = 0.0, in_cell_coordinate_y = 0.0, in_cell_coordinate_z = 0.0;
double one_per_grid_step_x = 0.0, one_per_grid_step_y = 0.0, one_per_grid_step_z = 0.0;
int icell_x = 0, icell_y = 0, icell_z = 0, ncell_x = 0, ncell_y= 0, ncell_z= 0;
bool valid_point = true;
// get the number of cells
ncell_x = grid->axis[X_]->ncell;
ncell_y = grid->axis[Y_]->ncell;
ncell_z = grid->axis[Z_]->ncell;
if (id + device_point_offset < device_number_of_points && id < point_count ) {
// get the cell indices and coordinates within cell in grid steps
calculate_icell_equidistant<NLIP>(
grid->axis[X_], points[id + device_point_offset], icell_x, in_cell_coordinate_x, one_per_grid_step_x);
calculate_icell_equidistant<NLIP>(
grid->axis[Y_], points[id + device_point_offset + device_number_of_points], icell_y, in_cell_coordinate_y, one_per_grid_step_y);
calculate_icell_equidistant<NLIP>(
grid->axis[Z_], points[id + device_point_offset + device_number_of_points*2], icell_z, in_cell_coordinate_z, one_per_grid_step_z);
}
else {
valid_point = false;
}
// if the result is not within the grid, set the icells to 0 and mark the point to be non-valid
if (icell_x < 0 || icell_x >= ncell_x || icell_y < 0 || icell_y >= ncell_y || icell_z < 0 || icell_z >= ncell_z) {
icell_x = 0;
icell_y = 0;
icell_z = 0;
valid_point = false;
}
// read the LIPs in the shared memory
__shared__ double lip[NLIP * NLIP];
read_lip<NLIP, NLIP>(grid->axis[X_]->lip, threadIdx.x, lip);
// evaluate the polynomials in x and y directions
double x_polynomials[8];
evaluate_polynomials<NLIP, NLIP>(lip, in_cell_coordinate_x, x_polynomials);
double polynomials[8];
evaluate_polynomials<NLIP, NLIP>(lip, in_cell_coordinate_y, polynomials);
double x_values[NLIP], y_values[NLIP];
// get the address to the first grid point of icell_x, icell_y and icell_z
int address = icell_x * (NLIP-1)
+ icell_y * device_pitch / sizeof(double) * (NLIP-1)
+ icell_z * device_pitch / sizeof(double) * device_shape_y * (NLIP-1);
if (evaluate_value || evaluate_gradients_z) {
for (int j = 0; j < NLIP; j++) {
// add the address by 'j' slices
int z_address = address + j * device_pitch / sizeof(double) * device_shape_y;
for (int k = 0; k < NLIP; k++) {
// add the address by 'k' rows
int y_address = z_address + k * device_pitch / sizeof(double);
x_values[k] = evaluate_coefficients<NLIP>(x_polynomials, device_cube, y_address, threadIdx.x);
}
y_values[j] = evaluate_coefficients_register<NLIP>(polynomials, x_values);
}
}
if (evaluate_value) {
// evaluate the polynomials in z-direction.
// NOTE: reusing the y-direction polynomial registers
evaluate_polynomials<NLIP, NLIP>(lip, in_cell_coordinate_z, polynomials);
// evaluate the coefficients
value = evaluate_coefficients_register<NLIP>(polynomials, y_values);
// if the point handled is valid, let's add it to the results
if (valid_point) {
result_array[id+device_point_offset] += multiplier * value;
}
}
// if we are evaluating the gradients, it is done within the brackets below
if (evaluate_gradients_x || evaluate_gradients_y || evaluate_gradients_z) {
__shared__ double derivative_lip[(NLIP-1) * NLIP];
read_lip<NLIP-1, NLIP>(grid->axis[X_]->derivative_lip, threadIdx.x, derivative_lip);
if (evaluate_gradients_z) {
// evaluate the gradient polynomials in z-direction.
evaluate_polynomials<NLIP-1, NLIP>(derivative_lip, in_cell_coordinate_z, polynomials);
// multiply the polynomials with 1 / grid_step
for (int j = 0; j < NLIP; j++) {
polynomials[j] *= one_per_grid_step_z;
}
// evaluate the derivative coefficients
// we can reuse the previous y_values, which are the same for this case
gradient[Z_] = evaluate_coefficients_register<NLIP>(polynomials, y_values);
}
// NOTE: we now have the derivatives in x-direction, but for the rest y- and z- directions,
// we have to recalculate everything else, as we need to save some registers.
// If we would have 49*2 extra registers the next loop would be futile.
if (evaluate_gradients_y) {
// let's calculate the y-axis derivative polynomials.
// Note that we are still using the same x-direction polynomials
evaluate_polynomials<NLIP-1, NLIP>(derivative_lip, in_cell_coordinate_y, polynomials);
// multiply the polynomials with 1 / grid_step
for (int j = 0; j < NLIP; j++) {
polynomials[j] *= one_per_grid_step_y;
}
// and let's do the looping again
for (int j = 0; j < NLIP; j++) {
// add the address by 'j' slices
int z_address = address + j * device_pitch / sizeof(double) * device_shape_y;
for (int k = 0; k < NLIP; k++) {
// add the address by 'k' rows
int y_address = z_address + k * device_pitch / sizeof(double);
x_values[k] = evaluate_coefficients<NLIP>(x_polynomials, device_cube, y_address, threadIdx.x);
}
y_values[j] = evaluate_coefficients_register<NLIP>(polynomials, x_values);
}
// evaluate the polynomials in z-direction.
// reusing the y-direction polynomial registers
evaluate_polynomials<NLIP, NLIP>(lip, in_cell_coordinate_z, polynomials);
// finally, we can get the derivative in y-direction
gradient[Y_] = evaluate_coefficients_register<NLIP>(polynomials, y_values);
}
if (evaluate_gradients_x) {
// evaluate the normal polynomials in y-direction
evaluate_polynomials<NLIP, NLIP>(lip, in_cell_coordinate_y, polynomials);
// and evaluate the derivative polynomials in x-direction
evaluate_polynomials<NLIP-1, NLIP>(derivative_lip, in_cell_coordinate_x, x_polynomials);
// multiply the polynomials with 1 / grid_step
for (int j = 0; j < NLIP; j++) {
x_polynomials[j] *= one_per_grid_step_x;
}
// and let's do the looping again
for (int j = 0; j < NLIP; j++) {
// add the address by 'j' slices
int z_address = address + j * device_pitch / sizeof(double) * device_shape_y;
for (int k = 0; k < NLIP; k++) {
// add the address by 'k' rows
int y_address = z_address + k * device_pitch / sizeof(double);
x_values[k] = evaluate_coefficients<NLIP>(x_polynomials, device_cube, y_address, threadIdx.x);
}
y_values[j] = evaluate_coefficients_register<NLIP>(polynomials, x_values);
}
// evaluate the polynomials in z-direction.
// reusing the y-direction polynomial registers
evaluate_polynomials<NLIP, NLIP>(lip, in_cell_coordinate_z, polynomials);
// finally, we are ready and can get the derivative in z-direction
gradient[X_] = evaluate_coefficients_register<NLIP>(polynomials, y_values);
}
// if the point handled is valid, let's store the gradient to the device_gradients
if (valid_point) {
if (evaluate_gradients_x) device_gradients_x[id+device_point_offset] += multiplier * gradient[X_];
if (evaluate_gradients_y) device_gradients_y[id+device_point_offset] += multiplier * gradient[Y_];
if (evaluate_gradients_z) device_gradients_z[id+device_point_offset] += multiplier * gradient[Z_];
}
}
return;
}
/*
* Evaluate cube gradients at grid points. The results are stored to 'device_gradients'.
*/
template <int nlip, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z>
__global__ void
#if (__CUDA_ARCH__ <= 350)
__launch_bounds__(BLOCK_SIZE)
#else
__launch_bounds__(BLOCK_SIZE)
#endif
CubeEvaluator_evaluate_grid_gradients(const double* __restrict__ device_cube,
const size_t device_pitch,
const size_t device_shape_y,
const Grid3D* __restrict__ grid,
double* __restrict__ device_gradients_x,
double* __restrict__ device_gradients_y,
double* __restrict__ device_gradients_z,
// number of slices handled by this device
// in previous calls
int device_slice_offset,
// number of slices handled by all devices
// in previous calls
int slice_offset,
// number of slices handled by this call
int slice_count,
// number of slices handled by this
// number of warps in a x-axis row
int warps_per_string,
const double multiplier
) {
// Get the point order number within this kernel call
int global_warp_id, thread_order_number, cells_per_warp;
int WARP_SIZE = 32;
bool valid_point = true;
// if nlip is 7, each warp of 32 handles 5 cells
if (nlip == 7) {
// get the global warp order number
global_warp_id = blockIdx.x * blockDim.x / WARP_SIZE
+ threadIdx. x / WARP_SIZE;
cells_per_warp = 5;
// get the order number of thread within the warp
thread_order_number = threadIdx.x % WARP_SIZE;
if (thread_order_number == 31) valid_point = false;
}
// get the number of cells
int ncell_x = grid->axis[X_]->ncell;
int ncell_y = grid->axis[Y_]->ncell;
int ncell_z = grid->axis[Z_]->ncell;
int y_shape = ncell_y * (nlip-1) + 1;
// get the z and y coordinates
int z = global_warp_id / (warps_per_string * y_shape);
int y = (global_warp_id - z * warps_per_string * y_shape) / warps_per_string;
// get the warp id withing the x-axis string
int string_warp_id = (global_warp_id
- z * warps_per_string * y_shape
- y * warps_per_string);
int icell_x = string_warp_id * cells_per_warp + thread_order_number / (nlip-1);
int x_modulo = thread_order_number % (nlip-1);
int x = icell_x * (nlip-1) + x_modulo;
// get the order numbers of cells within this device
int icell_z = (z + slice_offset) / (nlip-1);
int icell_y = y / (nlip-1);
// and get the remainders of the y and z coordinates
int y_modulo = y % (nlip-1);
int z_modulo = (z + slice_offset) % (nlip-1);
// if this thread handles the last cell of the x-axis
// set the correct icell
if (x_modulo == 0 && icell_x > 0) {
icell_x -= 1;
x_modulo = 6;
}
// if this thread handles data in the last index of the y-axis
if (y_modulo == 0 && icell_y > 0) {
icell_y -= 1;
y_modulo = 6;
}
// if this thread handles data in the last index of the z-axis
if (z_modulo == 0 && icell_z > 0) {
icell_z -= 1;
z_modulo = 6;
}
// if the result is not within the grid, mark the point to be non-valid
if ( icell_x < 0 || x >= ncell_x * (nlip-1) + 1
|| icell_y < 0 || y >= y_shape
|| icell_z < 0 || z + slice_offset >= ncell_z * (nlip-1) + 1
|| z >= slice_count) {
valid_point = false;
icell_x = 0;
icell_y = 0;
icell_z = 0;
x = 0;
y = 0;
z = 0;
thread_order_number = 32;
}
if (thread_order_number == 0 && x_modulo != 0) valid_point = false;
// calculate the 1 / grid steps for all axis
double one_per_grid_step_x = 1.0 / grid->axis[X_]->h[icell_x];
double one_per_grid_step_y = 1.0 / grid->axis[Y_]->h[icell_y];
double one_per_grid_step_z = 1.0 / grid->axis[Z_]->h[icell_z];
// get the in cell coordinate of x
double in_cell_coordinate_x = (double)(x_modulo - 3);
double in_cell_coordinate_y = (double)(y_modulo - 3);
double in_cell_coordinate_z = (double)(z_modulo - 3);
// read the LIPs in the shared memory
__shared__ double lip[nlip * nlip];
read_lip<nlip, nlip>(grid->axis[X_]->lip, threadIdx.x, lip);
__shared__ double derivative_lip[(nlip-1) * nlip];
read_lip<nlip-1, nlip>(grid->axis[X_]->derivative_lip, threadIdx.x, derivative_lip);
// init the polynomials in x direction
double x_polynomials[8];
// init the polynomials in y/z direction
double polynomials[8];
double x_values[nlip], y_values[nlip];
int address;
double gradient[3];
// evaluate gradient to x direction
if (evaluate_gradients_z) {
address = x
+ y * device_pitch / sizeof(double)
+ icell_z * (nlip-1) * device_pitch / sizeof(double) * device_shape_y;
for (int j = 0; j < nlip; j++) {
// add the address by 'j' slices
int z_address = address + j * device_pitch / sizeof(double) * device_shape_y;
// read the value
y_values[j] = device_cube[z_address];
}
// evaluate the polynomials in z-direction.
evaluate_polynomials<nlip-1, nlip>(derivative_lip, in_cell_coordinate_z, polynomials);
// multiply the polynomials with 1 / grid_step
for (int j = 0; j < nlip; j++) {
polynomials[j] *= one_per_grid_step_z;
}
// Now we have all to evaluate the gradients in z direction. Let's do it.
gradient[Z_] = evaluate_coefficients_register<nlip>(polynomials, y_values);
}
// evaluate gradient to y direction
if (evaluate_gradients_y) {
// evaluate the derivative polynomials in y direction
evaluate_polynomials<nlip-1, nlip>(derivative_lip, in_cell_coordinate_y, polynomials);
// multiply the polynomials with 1 / grid_step
for (int j = 0; j < nlip; j++) {
polynomials[j] *= one_per_grid_step_y;
}
// get the address to the first grid point of icell_y and icell_z and to the point x
address = x
+ icell_y * device_pitch / sizeof(double) * (nlip-1)
+ icell_z * device_pitch / sizeof(double) * device_shape_y * (nlip-1);
for (int j = 0; j < nlip; j++) {
// add the address by 'j' slices
int z_address = address + j * device_pitch / sizeof(double) * device_shape_y;
for (int k = 0; k < nlip; k++) {
// add the address by 'k' rows
int y_address = z_address + k * device_pitch / sizeof(double);
// read in the x value
x_values[k] = device_cube[y_address];
}
y_values[j] = evaluate_coefficients_register<nlip>(polynomials, x_values);
}
// evaluate the polynomials in z-direction.
// NOTE: reusing the y-direction polynomial registers
evaluate_polynomials<nlip, nlip>(lip, in_cell_coordinate_z, polynomials);
// Now we have all to evaluate the gradients in y direction. Let's do it.
gradient[Y_] = evaluate_coefficients_register<nlip>(polynomials, y_values);
}
// evaluate gradient to z direction
if (evaluate_gradients_x) {
// evaluate the polynomials in y-direction.
evaluate_polynomials<nlip, nlip>(lip, in_cell_coordinate_y, polynomials);
// evaluate the derivative polynomials in x direction
evaluate_polynomials<nlip-1, nlip>(derivative_lip, in_cell_coordinate_x, x_polynomials);
// multiply the polynomials with 1 / grid_step
for (int j = 0; j < nlip; j++) {
x_polynomials[j] *= one_per_grid_step_x;
}
address = x
+ icell_y * device_pitch / sizeof(double) * (nlip-1)
+ icell_z * device_pitch / sizeof(double) * device_shape_y * (nlip-1);
for (int j = 0; j < nlip; j++) {
// add the address by 'j' slices
int z_address = address + j * device_pitch / sizeof(double) * device_shape_y;
for (int k = 0; k < nlip; k++) {
// add the address by 'k' rows
int y_address = z_address + k * device_pitch / sizeof(double);
// read in the x value
double x_value = device_cube[y_address];
// evaluate the derivative value
x_values[k] = evaluate_coefficients_shuffle<nlip>(x_polynomials, x_value, thread_order_number, x_modulo);
}
y_values[j] = evaluate_coefficients_register<nlip>(polynomials, x_values);
}
// evaluate the polynomials in z-direction.
// NOTE: reusing the y-direction polynomial registers
evaluate_polynomials<nlip, nlip>(lip, in_cell_coordinate_z, polynomials);
// evaluate the coefficients
gradient[X_] = evaluate_coefficients_register<nlip>(polynomials, y_values);
}
address = x
+ y * device_pitch / sizeof(double)
+ (z + device_slice_offset) * device_pitch / sizeof(double) * device_shape_y;
// if the point handled is valid, let's add it to the results
if (valid_point) {
/*if (x >= ncell_x * (nlip-1) +1 || y >= ncell_y * (nlip-1) + 1 || z + slice_offset >= ncell_z * (nlip-1) + 1 || z >= slice_count) {
printf("over bounds x: %d/%d, y: %d/%d, z: %d / %d\n", x, ncell_x * (nlip-1) +1, y, ncell_y * (nlip-1) + 1, z, slice_count);
}*/
//int max_address = (device_slice_offset + slice_count) * device_shape_y * device_pitch / sizeof(double);
//if (address >= max_address || address < 0 ) printf("address over bounds: %d / %d", address, max_address);
if (evaluate_gradients_x) device_gradients_x[address] += multiplier * gradient[X_];
if (evaluate_gradients_y) device_gradients_y[address] += multiplier * gradient[Y_];
if (evaluate_gradients_z) device_gradients_z[address] += multiplier * gradient[Z_];
}
return;
}
// this is a bit ugly ... ideally fin_diff_order and grid_type should both be
// tempate parameters, but they should also be user-input ...
template<int finite_diff_order>
__device__ __forceinline__
double evaluate_derivative(const int curr_id,
const int prev_id1, const int prev_id2, const int prev_id3, const int prev_id4, const int prev_id5, const int prev_id6, const int prev_id7, const int prev_id8,
const int next_id1, const int next_id2, const int next_id3, const int next_id4, const int next_id5, const int next_id6, const int next_id7, const int next_id8,
const double* __restrict__ device_cube, const int grid_type, const int local_pos, const double h){
if (curr_id == -1) return 0.0;
// printf("xy: %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i\n", prev_id6, prev_id5, prev_id4, prev_id3, prev_id2, prev_id1, next_id1, next_id2, next_id3, next_id4, next_id5, next_id6);
double curr_value,
prev_value1, prev_value2, prev_value3, prev_value4, prev_value5, prev_value6, prev_value7, prev_value8,
next_value1, next_value2, next_value3, next_value4, next_value5, next_value6, next_value7, next_value8;
if (curr_id > -1) curr_value = __ldg(&device_cube[curr_id]);
if (prev_id1 > -1) prev_value1 = __ldg(&device_cube[prev_id1]);
if (next_id1 > -1) next_value1 = __ldg(&device_cube[next_id1]);
if (finite_diff_order >= 3){
if (prev_id2 > -1) prev_value2 = __ldg(&device_cube[prev_id2]);
if (next_id2 > -1) next_value2 = __ldg(&device_cube[next_id2]);
}
if (finite_diff_order >= 4){
if (prev_id3 > -1) prev_value3 = __ldg(&device_cube[prev_id3]);
if (next_id3 > -1) next_value3 = __ldg(&device_cube[next_id3]);
}
if (finite_diff_order >= 5){
if (prev_id4 > -1) prev_value4 = __ldg(&device_cube[prev_id4]);
if (next_id4 > -1) next_value4 = __ldg(&device_cube[next_id4]);
}
if (finite_diff_order >= 6){
if (prev_id5 > -1) prev_value5 = __ldg(&device_cube[prev_id5]);
if (next_id5 > -1) next_value5 = __ldg(&device_cube[next_id5]);
}
if (finite_diff_order >= 7){
if (prev_id6 > -1) prev_value6 = __ldg(&device_cube[prev_id6]);
if (next_id6 > -1) next_value6 = __ldg(&device_cube[next_id6]);
}
if (finite_diff_order >= 8){
if (prev_id7 > -1) prev_value7 = __ldg(&device_cube[prev_id7]);
if (next_id7 > -1) next_value7 = __ldg(&device_cube[next_id7]);
}
if (finite_diff_order >= 9){
if (prev_id8 > -1) prev_value8 = __ldg(&device_cube[prev_id8]);
if (next_id8 > -1) next_value8 = __ldg(&device_cube[next_id8]);
}
if(grid_type == 1){ // equidistant
if ( finite_diff_order >= 11 && prev_id1 > -1 && next_id1 > -1 && prev_id2 > -1 && next_id2 > -1 && prev_id3 > -1 && next_id3 > -1 && prev_id4 > -1 && next_id4 > -1 && prev_id5 > -1 && next_id5 > -1 ) { // x x x x x o x x x x x
return (-1.0 * prev_value5 + 12.5 * prev_value4 - 75.0 * prev_value3 + 300.0 * prev_value2 - 1050.0 * prev_value1 + 1050.0 * next_value1 - 300.0 * next_value2 + 75.0 * next_value3 - 12.5 * next_value4 + 1.0 * next_value5) / (1260.0*h);
}
else if ( finite_diff_order >= 9 && prev_id1 > -1 && next_id1 > -1 && prev_id2 > -1 && next_id2 > -1 && prev_id3 > -1 && next_id3 > -1 && prev_id4 > -1 && next_id4 > -1 ) { // x x x x o x x x x
return (3.0 * prev_value4 - 32.0 * prev_value3 + 168.0 * prev_value2 - 672.0 * prev_value1 + 672.0 * next_value1 - 168.0 * next_value2 + 32.0 * next_value3 - 3.0 * next_value4 ) / (840.0*h);
}
else if ( finite_diff_order >= 7 && prev_id1 > -1 && next_id1 > -1 && prev_id2 > -1 && next_id2 > -1 && prev_id3 > -1 && next_id3 > -1 ) { // x x x o x x x
return (-1.0 * prev_value3 + 9.0 * prev_value2 - 45.0 * prev_value1 + 45.0 * next_value1 - 9.0 * next_value2 + 1.0 * next_value3) / (60.0*h);
}
else if ( finite_diff_order >= 5 && prev_id1 > -1 && next_id1 > -1 && prev_id2 > -1 && next_id2 > -1 ) { // x x o x x
return (-1.0 * next_value2 + 8.0 * next_value1 - 8.0 * prev_value1 + prev_value2) / (12.0*h);
}
else if ( finite_diff_order >= 3 && prev_id1 > -1 && next_id1 > -1 ) { // x o x
return (next_value1 - prev_value1) / (2.0 * h);
}
else if ( finite_diff_order >= 3 && next_id1 > -1 && next_id2 > -1 ) { // o x x
return (-1.0 * next_value2 + 4.0 * next_value1 - 3.0 * curr_value) / (2.0 * h);
}
else if ( finite_diff_order >= 3 && prev_id1 > -1 && prev_id2 > -1) { // x x o
return (1.0 * prev_value2 - 4.0 * prev_value2 + 3.0 * curr_value) / (2.0 * h);
}
else if ( finite_diff_order >= 2 && next_id1 > -1 ) { // o x
return (next_value1 - curr_value) / h;
}
else if ( finite_diff_order >= 2 && prev_id1 > -1 ) { // x o
return (curr_value - prev_value1) / h;
}
}
else if(grid_type == 2){ // lobatto
if (finite_diff_order == 9){
if ( prev_id4 > -1 && prev_id3 > -1 && prev_id2 > -1 && prev_id1 > -1 && next_id1 > -1 && next_id2 > -1 && next_id3 > -1 && next_id4 > -1){
// centre at pos 0: {0.00019967699748295, -0.0036233997312734, 0.057221995168223, -1.1410916482317, 0, 1.1410916482317, -0.057221995168223, 0.0036233997312734, -0.00019967699748295}
// centre at pos 1: {0.00089464933543953, -0.030021565652814, 0.64613210399319, -2.7472765818353, 1.9243274545541, 0.22575990586671, -0.022628075171264, 0.0031999844358336, -0.000387875525842}
// centre at pos 2: {0.014381546028833, -0.46942683846151, 1.7209311289771, -2.310743566852, 0.78934433020611, 0.31410347917631, -0.080765809769078, 0.032393740023427, -0.010218009329259}
// centre at pos 3: {0.075131403109164, -0.38693923899442, 0.6095587888754, -0.79702587611059, 0, 0.79702587611059, -0.6095587888754, 0.38693923899442, -0.075131403109164}
// centre at pos 4: {0.010218009329259, -0.032393740023427, 0.080765809769078, -0.31410347917631, -0.78934433020611, 2.310743566852, -1.7209311289771, 0.46942683846151, -0.014381546028833}
// centre at pos 5: {0.000387875525842, -0.0031999844358336, 0.022628075171264, -0.22575990586671, -1.9243274545541, 2.7472765818353, -0.64613210399319, 0.030021565652814, -0.00089464933543953}
// centre at pos 6: {0.00019967699748295, -0.0036233997312734, 0.057221995168223, -1.1410916482317, 0, 1.1410916482317, -0.057221995168223, 0.0036233997312734, -0.00019967699748295}
switch(local_pos){
case(0):
return ( 0.00019967699748295 * prev_value4 + -0.0036233997312734 * prev_value3 + 0.057221995168223 * prev_value2 + -1.1410916482317 * prev_value1 + 0 * curr_value + 1.1410916482317 * next_value1 + -0.057221995168223 * next_value2 + 0.0036233997312734 * next_value3 + -0.00019967699748295 * next_value4 )/h;
case(1):
return ( 0.00089464933543953 * prev_value4 + -0.030021565652814 * prev_value3 + 0.64613210399319 * prev_value2 + -2.7472765818353 * prev_value1 + 1.9243274545541 * curr_value + 0.22575990586671 * next_value1 + -0.022628075171264 * next_value2 + 0.0031999844358336 * next_value3 + -0.000387875525842 * next_value4 )/h;
case(2):
return ( 0.014381546028833 * prev_value4 + -0.46942683846151 * prev_value3 + 1.7209311289771 * prev_value2 + -2.310743566852 * prev_value1 + 0.78934433020611 * curr_value + 0.31410347917631 * next_value1 + -0.080765809769078 * next_value2 + 0.032393740023427 * next_value3 + -0.010218009329259 * next_value4 )/h;
case(3):
return ( 0.075131403109164 * prev_value4 + -0.38693923899442 * prev_value3 + 0.6095587888754 * prev_value2 + -0.79702587611059 * prev_value1 + 0 * curr_value + 0.79702587611059 * next_value1 + -0.6095587888754 * next_value2 + 0.38693923899442 * next_value3 + -0.075131403109164 * next_value4 )/h;
case(4):
return ( 0.010218009329259 * prev_value4 + -0.032393740023427 * prev_value3 + 0.080765809769078 * prev_value2 + -0.31410347917631 * prev_value1 + -0.78934433020611 * curr_value + 2.310743566852 * next_value1 + -1.7209311289771 * next_value2 + 0.46942683846151 * next_value3 + -0.014381546028833 * next_value4 )/h;
case(5):
return ( 0.000387875525842 * prev_value4 + -0.0031999844358336 * prev_value3 + 0.022628075171264 * prev_value2 + -0.22575990586671 * prev_value1 + -1.9243274545541 * curr_value + 2.7472765818353 * next_value1 + -0.64613210399319 * next_value2 + 0.030021565652814 * next_value3 + -0.00089464933543953 * next_value4 )/h;
}
}
// centre at pos 0: {-3.7853180645098, 5.504949536976, -3.1667044864103, 3.2707014929785, -5.0400751724465, 10.133071777992, -10.15049044192, 3.3679166521506, -0.13405129481064}
// centre at pos 1: {-0.70024662338511, -0.30782735432573, 1.6600314728931, -1.4050588677272, 2.0324950315796, -3.9835379214689, 3.9560488107718, -1.3031520813358, 0.051247532998119}
// centre at pos 2: {0.12436941987383, -0.5125363975973, -0.37008926442197, 1.3824872733712, -1.5646036607053, 2.8292082985006, -2.7390058952044, 0.88379134357785, -0.033621117394515}
// centre at pos 3: {-0.033971645333468, 0.11472881590315, -0.36562067241713, -0.50265593560168, 1.6092337716972, -2.276606371715, 2.0689731675221, -0.63666554571692, 0.022584415661807}
else if (prev_id1 == -1)
return ( -3.7853180645098 * curr_value + 5.504949536976 * next_value1 + -3.1667044864103 * next_value2 + 3.2707014929785 * next_value3 + -5.0400751724465 * next_value4 + 10.133071777992 * next_value5 + -10.15049044192 * next_value6 + 3.3679166521506 * next_value7 + -0.13405129481064 * next_value8 )/h;
else if (prev_id2 == -1)
return ( -0.70024662338511 * prev_value1 + -0.30782735432573 * curr_value + 1.6600314728931 * next_value1 + -1.4050588677272 * next_value2 + 2.0324950315796 * next_value3 + -3.9835379214689 * next_value4 + 3.9560488107718 * next_value5 + -1.3031520813358 * next_value6 + 0.051247532998119 * next_value7 )/h;
else if (prev_id3 == -1)
return ( 0.12436941987383 * prev_value2 + -0.5125363975973 * prev_value1 + -0.37008926442197 * curr_value + 1.3824872733712 * next_value1 + -1.5646036607053 * next_value2 + 2.8292082985006 * next_value3 + -2.7390058952044 * next_value4 + 0.88379134357785 * next_value5 + -0.033621117394515 * next_value6 )/h;
else if (prev_id4 == -1)
return ( -0.033971645333468 * prev_value3 + 0.11472881590315 * prev_value2 + -0.36562067241713 * prev_value1 + -0.50265593560168 * curr_value + 1.6092337716972 * next_value1 + -2.276606371715 * next_value2 + 2.0689731675221 * next_value3 + -0.63666554571692 * next_value4 + 0.022584415661807 * next_value5 )/h;
// centre at pos 6: {0.13405129481064, -3.3679166521506, 10.15049044192, -10.133071777992, 5.0400751724464, -3.2707014929785, 3.1667044864103, -5.504949536976, 3.7853180645098}
// centre at pos 5: {-0.051247532998119, 1.3031520813358, -3.9560488107718, 3.9835379214689, -2.0324950315796, 1.4050588677272, -1.6600314728931, 0.30782735432572, 0.70024662338511}
// centre at pos 4: {0.033621117394515, -0.88379134357785, 2.7390058952044, -2.8292082985006, 1.5646036607053, -1.3824872733712, 0.37008926442197, 0.5125363975973, -0.12436941987383}
// centre at pos 3: {-0.022584415661807, 0.63666554571692, -2.0689731675221, 2.276606371715, -1.6092337716972, 0.50265593560168, 0.36562067241713, -0.11472881590315, 0.033971645333468}
else if (next_id1 == -1)
return ( 0.13405129481064 * prev_value8 + -3.3679166521506 * prev_value7 + 10.15049044192 * prev_value6 + -10.133071777992 * prev_value5 + 5.0400751724464 * prev_value4 + -3.2707014929785 * prev_value3 + 3.1667044864103 * prev_value2 + -5.504949536976 * prev_value1 + 3.7853180645098 * curr_value )/h;
else if (next_id2 == -1)
return ( -0.051247532998119 * prev_value7 + 1.3031520813358 * prev_value6 + -3.9560488107718 * prev_value5 + 3.9835379214689 * prev_value4 + -2.0324950315796 * prev_value3 + 1.4050588677272 * prev_value2 + -1.6600314728931 * prev_value1 + 0.30782735432572 * curr_value + 0.70024662338511 * next_value1 )/h;
else if (next_id3 == -1)
return ( 0.033621117394515 * prev_value6 + -0.88379134357785 * prev_value5 + 2.7390058952044 * prev_value4 + -2.8292082985006 * prev_value3 + 1.5646036607053 * prev_value2 + -1.3824872733712 * prev_value1 + 0.37008926442197 * curr_value + 0.5125363975973 * next_value1 + -0.12436941987383 * next_value2 )/h;
else if (next_id4 == -1)
return ( -0.022584415661807 * prev_value5 + 0.63666554571692 * prev_value4 + -2.0689731675221 * prev_value3 + 2.276606371715 * prev_value2 + -1.6092337716972 * prev_value1 + 0.50265593560168 * curr_value + 0.36562067241713 * next_value1 + -0.11472881590315 * next_value2 + 0.033971645333468 * next_value3 )/h;
}
else if (finite_diff_order == 7){
if ( prev_id3 > -1 && prev_id2 > -1 && prev_id1 > -1 && next_id1 > -1 && next_id2 > -1 && next_id3 > -1 ){
// some of the following 7 rules are slightly subobtimal in the
// sense that an asymmetric choice of points would have a smaller
// error, but the problem is small and this is easier
// generated using https://github.com/lnw/finite-diff-weights
// assuming a the lobatto grid with points {-3,-2.4906716888357,-1.40654638041214,0,1.40654638041214,2.4906716888357,3}
// centre at pos 0: {-0.0019439691154442, 0.049739522152831, -1.1258469276013, 0, 1.1258469276013, -0.049739522152831, 0.0019439691154442}
// centre at pos 1: {-0.017112093697958, 0.55235536265242, -2.588681589899, 1.8401216993421, 0.23119070988819, -0.019343936928851, 0.0014698486430997}
// centre at pos 2: {-0.23589340925937, 1.1716189825849, -1.899781686814, 0.70249557758477, 0.30822337312933, -0.054985788958057, 0.0083229517325248}
// centre at pos 3: {-0.10416666666667, 0.30251482375627, -0.66898974686292, 0, 0.66898974686292, -0.30251482375627, 0.10416666666667}
// centre at pos 4: {-0.0083229517325248, 0.054985788958057, -0.30822337312933, -0.70249557758477, 1.899781686814, -1.1716189825849, 0.23589340925937}
// centre at pos 5: {-0.0014698486430997, 0.019343936928851, -0.23119070988819, -1.8401216993421, 2.588681589899, -0.55235536265242, 0.017112093697958}
// centre at pos 6: {-0.0019439691154442, 0.049739522152831, -1.1258469276013, 0, 1.1258469276013, -0.049739522152831, 0.0019439691154442}
switch(local_pos){
case(0):
return ( -0.00194396911544 * prev_value3 + 0.0497395221528 * prev_value2 + -1.1258469276 * prev_value1 + 0 * curr_value + 1.1258469276 * next_value1 + -0.0497395221528 * next_value2 + 0.00194396911544 * next_value3 )/h;
case(1):
return ( -0.017112093698 * prev_value3 + 0.552355362652 * prev_value2 + -2.5886815899 * prev_value1 + 1.84012169934 * curr_value + 0.231190709888 * next_value1 + -0.0193439369289 * next_value2 + 0.0014698486431 * next_value3 )/h;
case(2):
return ( -0.235893409259 * prev_value3 + 1.17161898258 * prev_value2 + -1.89978168681 * prev_value1 + 0.702495577585 * curr_value + 0.308223373129 * next_value1 + -0.0549857889581 * next_value2 + 0.00832295173252 * next_value3 )/h;
case(3):
return ( -0.104166666667 * prev_value3 + 0.302514823756 * prev_value2 + -0.668989746863 * prev_value1 + 0 * curr_value + 0.668989746863 * next_value1 + -0.302514823756 * next_value2 + 0.104166666667 * next_value3 )/h;
case(4):
return ( -0.00832295173252 * prev_value3 + 0.0549857889581 * prev_value2 + -0.308223373129 * prev_value1 + -0.702495577585 * curr_value + 1.89978168681 * next_value1 + -1.17161898258 * next_value2 + 0.235893409259 * next_value3 )/h;
case(5):
return ( -0.0014698486431 * prev_value3 + 0.0193439369289 * prev_value2 + -0.231190709888 * prev_value1 + -1.84012169934 * curr_value + 2.5886815899 * next_value1 + -0.552355362652 * next_value2 + 0.017112093698 * next_value3 )/h;
}
}
// generated using https://github.com/lnw/finite-diff-weights
// assuming a the lobatto grid with points {-3,-2.4906716888357,-1.40654638041214,0,1.40654638041214,2.4906716888357,3}
// centre at pos 0: {-3.5, 4.7338588676399, -1.8896617418485, 1.0666666666667, -0.68332160435892, 0.43912447856748, -0.16666666666667 }
// centre at pos 1: {-0.81430867141476, 0, 1.1519427380981, -0.53286889603279, 0.32044659909624, -0.2007490598786, 0.075537290131815 }
// centre at pos 2: {0.20841888850511, -0.73860142772332, 0, 0.75556602902867, -0.35548063466879, 0.20546361183919, -0.075366466980858}
else if (prev_id1 == -1)
return ( -3.5 * curr_value + 4.7338588676399 * next_value1 + -1.8896617418485 * next_value2 + 1.0666666666667 * next_value3 + -0.68332160435892 * next_value4 + 0.43912447856748 * next_value5 + -0.16666666666667 * next_value6)/h;
else if (prev_id2 == -1)
return ( -0.81430867141476 * prev_value1 + 0 * curr_value + 1.1519427380981 * next_value1 + -0.53286889603279 * next_value2 + 0.32044659909624 * next_value3 + -0.2007490598786 * next_value4 + 0.075537290131815 * next_value5)/h;
else if (prev_id3 == -1)
return ( 0.20841888850511 * prev_value2 + -0.73860142772332 * prev_value1 + 0 * curr_value + 0.75556602902867 * next_value1 + -0.35548063466879 * next_value2 + 0.20546361183919 * next_value3 + -0.075366466980858 * next_value4)/h;
// generated using https://github.com/lnw/finite-diff-weights
// assuming a the lobatto grid with points {-3,-2.4906716888357,-1.40654638041214,0,1.40654638041214,2.4906716888357,3}
// centre at pos 6: {0.16666666666667, -0.43912447856748, 0.68332160435892, -1.0666666666667, 1.8896617418485, -4.7338588676399, 3.5 }
// centre at pos 5: {-0.075537290131815, 0.2007490598786, -0.32044659909624, 0.53286889603279, -1.1519427380981, 0, 0.81430867141476 }
// centre at pos 4: {0.075366466980858, -0.20546361183919, 0.35548063466879, -0.75556602902867, 0, 0.73860142772332, -0.20841888850511}
else if (next_id1 == -1)
return ( 0.16666666666667 * prev_value6 + -0.43912447856748 * prev_value5 + 0.68332160435892 * prev_value4 + -1.0666666666667 * prev_value3 + 1.8896617418485 * prev_value2 + -4.7338588676399 * prev_value1 + 3.5 * curr_value )/h;
else if (next_id2 == -1)
return ( -0.075537290131815 * prev_value5 + 0.2007490598786 * prev_value4 + -0.32044659909624 * prev_value3 + 0.53286889603279 * prev_value2 + -1.1519427380981 * prev_value1 + 0 * curr_value + 0.81430867141476 * next_value1)/h;
else if (next_id3 == -1)
return ( 0.075366466980858 * prev_value4 + -0.20546361183919 * prev_value3 + 0.35548063466879 * prev_value2 + -0.75556602902867 * prev_value1 + 0 * curr_value + 0.73860142772332 * next_value1 + -0.20841888850511 * next_value2)/h;
}
else if (finite_diff_order == 5){
if ( prev_id2 > -1 && prev_id1 > -1 && next_id1 > -1 && next_id2 > -1 ){
// generated using https://github.com/lnw/finite-diff-weights
// assuming a the lobatto grid with points {-3,-2.4906716888357,-1.40654638041214,0,1.40654638041214,2.4906716888357,3}
// centre at pos 0: {0.035706928371056, -1.0933955997541, 0, 1.0933955997541, -0.035706928371056}
// centre at pos 1: {0.35921124012101, -2.2180302446983, 1.6211545002871, 0.2529151880879, -0.015250683797656}
// centre at pos 2: {0.3998165330722, -1.1763296553934, 0.48352837852975, 0.32874345055196, -0.035758706760461}
// centre at pos 3: {0.093999911479851, -0.52193296182726, 0, 0.52193296182726, -0.093999911479851}
// centre at pos 4: {0.035758706760461, -0.32874345055196, -0.48352837852975, 1.1763296553934, -0.3998165330722}
// centre at pos 5: {0.015250683797656, -0.2529151880879, -1.6211545002871, 2.2180302446983, -0.35921124012101}
switch(local_pos){
case(0):
return ( 0.035706928371056 * prev_value2 + -1.0933955997541 * prev_value1 + 0 * curr_value + 1.0933955997541 * next_value1 + -0.035706928371056 * next_value2 )/h;
case(1):
return ( 0.35921124012101 * prev_value2 + -2.2180302446983 * prev_value1 + 1.6211545002871 * curr_value + 0.2529151880879 * next_value1 + -0.015250683797656 * next_value2 )/h;
case(2):
return ( 0.3998165330722 * prev_value2 + -1.1763296553934 * prev_value1 + 0.48352837852975 * curr_value + 0.32874345055196 * next_value1 + -0.035758706760461 * next_value2 )/h;
case(3):
return ( 0.093999911479851 * prev_value2 + -0.52193296182726 * prev_value1 + 0 * curr_value + 0.52193296182726 * next_value1 + -0.093999911479851 * next_value2 )/h;
case(4):
return ( 0.035758706760461 * prev_value2 + -0.32874345055196 * prev_value1 + -0.48352837852975 * curr_value + 1.1763296553934 * next_value1 + -0.3998165330722 * next_value2 )/h;
case(5):
return ( 0.015250683797656 * prev_value2 + -0.2529151880879 * prev_value1 + -1.6211545002871 * curr_value + 2.2180302446983 * next_value1 + -0.35921124012101 * next_value2 )/h;
}
}
// generated using https://github.com/lnw/finite-diff-weights
// assuming a the lobatto grid with points {-3,-2.4906716888357,-1.40654638041214,0,1.40654638041214,2.4906716888357,3}
// centre at pos 0: {-3.1512062536842, 3.9301627535249, -0.98505481216702, 0.24193000589467, -0.03583169356836}
// centre at pos 1: {-0.98083020142501, 0.38287613952776, 0.72328921328621, -0.14557478380549, 0.020239632416528}
else if (prev_id1 == -1)
return ( -3.1512062536842 * curr_value + 3.9301627535249 * next_value1 + -0.98505481216702 * next_value2 + 0.24193000589467 * next_value3 + -0.03583169356836 * next_value4 )/h;
else if (prev_id2 == -1)
return ( -0.98083020142501 * prev_value1 + 0.38287613952776 * curr_value + 0.72328921328621 * next_value1 + -0.14557478380549 * next_value2 + 0.020239632416528 * next_value3 )/h;
// generated using https://github.com/lnw/finite-diff-weights
// assuming a the lobatto grid with points {-3,-2.4906716888357,-1.40654638041214,0,1.40654638041214,2.4906716888357,3}
// centre at pos 6: {0.03583169356836, -0.24193000589467, 0.98505481216702, -3.9301627535249, 3.1512062536842}
// centre at pos 5: {-0.020239632416528, 0.14557478380549, -0.72328921328621, -0.38287613952776, 0.98083020142501}
else if (next_id1 == -1)
return ( 0.03583169356836 * prev_value4 + -0.24193000589467 * prev_value3 + 0.98505481216702 * prev_value2 + -3.9301627535249 * prev_value1 + 3.1512062536842 * curr_value )/h;
else if (next_id2 == -1)
return ( -0.020239632416528 * prev_value3 + 0.14557478380549 * prev_value2 + -0.72328921328621 * prev_value1 + -0.38287613952776 * curr_value + 0.98083020142501 * next_value1 )/h;
}
}
return 0.0;
}
/*
* Evaluate cube gradients at grid points for simple equidistant grid. The results are stored to 'device_gradients'.
*
*/
template <int finite_diff_order, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z>
__global__ void
CubeEvaluator_evaluate_simple_grid_gradients(
const double* __restrict__ device_cube,
const size_t device_pitch,
const size_t device_shape_y,
const Grid3D* __restrict__ grid,
double* __restrict__ device_gradients_x,
double* __restrict__ device_gradients_y,
double* __restrict__ device_gradients_z,
// number of slices handled by this device
// in previous calls
int device_slice_offset,
// number of slices handled by all devices
// in previous calls
int slice_offset,
// number of slices handled by this call
int slice_count,
const double multiplier
) {
// The result array will be in fortran with indices x, y, z.
// This means that the x index will be the fastest to change.
int x, y, z;
getXYZ(&x, &y, &z);
const int grid_type_x = grid->axis[X_]->grid_type,
grid_type_y = grid->axis[Y_]->grid_type,
grid_type_z = grid->axis[Z_]->grid_type;
const double h_x = grid->axis[X_]->h[0];
const double h_y = grid->axis[Y_]->h[0];
const double h_z = grid->axis[Z_]->h[0];
// get the offset from the input cube pointer
int id = getCubeOffset3D(x, y, z+slice_offset, device_pitch, device_shape_y);
int local_id = getCubeOffset3D(x, y, z+device_slice_offset, device_pitch, device_shape_y);
bool valid_point = x >= 0
&& y >= 0
&& z+slice_offset >= 0
&& z < slice_count
&& x < grid->shape[X_]
&& y < grid->shape[Y_]
&& z+slice_offset < grid->shape[Z_];
if (!valid_point) id = -1;
// position within a cell. This is required because there is no
// translational symmetry by fractions of a cell, and this is relevant for
// finite diff weights.
int local_pos_x, local_pos_y, local_pos_z;
if(grid_type_x == 2 || grid_type_y == 2 || grid_type_z == 2){ // only read in case of lobatto
local_pos_x = x%(NLIP-1);
local_pos_y = y%(NLIP-1);
local_pos_z = (z+slice_offset)%(NLIP-1);
}
// evaluate gradient to z direction
if (evaluate_gradients_z) {
int prev_id1 = -1, prev_id2 = -1, prev_id3 = -1, prev_id4 = -1, prev_id5 = -1, prev_id6 = -1, prev_id7 = -1, prev_id8 = -1,
next_id1 = -1, next_id2 = -1, next_id3 = -1, next_id4 = -1, next_id5 = -1, next_id6 = -1, next_id7 = -1, next_id8 = -1;
if (finite_diff_order >= 2 && z + slice_offset -1 >= 0) {
prev_id1 = getCubeOffset3D(x, y, z+slice_offset-1, device_pitch, device_shape_y);
}
if (finite_diff_order >= 2 && z + slice_offset +1 < grid->shape[Z_]) {
next_id1 = getCubeOffset3D(x, y, z+slice_offset+1, device_pitch, device_shape_y);
}
if (finite_diff_order >= 3 && z + slice_offset -2 >= 0) {
prev_id2 = getCubeOffset3D(x, y, z+slice_offset-2, device_pitch, device_shape_y);
}
if (finite_diff_order >= 3 && z + slice_offset +2 < grid->shape[Z_]) {
next_id2 = getCubeOffset3D(x, y, z+slice_offset+2, device_pitch, device_shape_y);
}
if (finite_diff_order >= 4 && z + slice_offset -3 >= 0) {
prev_id3 = getCubeOffset3D(x, y, z+slice_offset-3, device_pitch, device_shape_y);
}
if (finite_diff_order >= 4 && z + slice_offset +3 < grid->shape[Z_]) {
next_id3 = getCubeOffset3D(x, y, z+slice_offset+3, device_pitch, device_shape_y);
}
if (finite_diff_order >= 5 && z + slice_offset -4 >= 0) {
prev_id4 = getCubeOffset3D(x, y, z+slice_offset-4, device_pitch, device_shape_y);
}
if (finite_diff_order >= 5 && z + slice_offset +4 < grid->shape[Z_]) {
next_id4 = getCubeOffset3D(x, y, z+slice_offset+4, device_pitch, device_shape_y);
}
if (finite_diff_order >= 6 && z + slice_offset -5 >= 0) {
prev_id5 = getCubeOffset3D(x, y, z+slice_offset-5, device_pitch, device_shape_y);
}
if (finite_diff_order >= 6 && z + slice_offset +5 < grid->shape[Z_]) {
next_id5 = getCubeOffset3D(x, y, z+slice_offset+5, device_pitch, device_shape_y);
}
if (finite_diff_order >= 7 && z + slice_offset -6 >= 0) {
prev_id6 = getCubeOffset3D(x, y, z+slice_offset-6, device_pitch, device_shape_y);
}
if (finite_diff_order >= 7 && z + slice_offset +6 < grid->shape[Z_]) {
next_id6 = getCubeOffset3D(x, y, z+slice_offset+6, device_pitch, device_shape_y);
}
if (finite_diff_order >= 8 && z + slice_offset -7 >= 0) {
prev_id7 = getCubeOffset3D(x, y, z+slice_offset-7, device_pitch, device_shape_y);
}
if (finite_diff_order >= 8 && z + slice_offset +7 < grid->shape[Z_]) {
next_id7 = getCubeOffset3D(x, y, z+slice_offset+7, device_pitch, device_shape_y);
}
if (finite_diff_order >= 9 && z + slice_offset -8 >= 0) {
prev_id8 = getCubeOffset3D(x, y, z+slice_offset-8, device_pitch, device_shape_y);
}
if (finite_diff_order >= 9 && z + slice_offset +8 < grid->shape[Z_]) {
next_id8 = getCubeOffset3D(x, y, z+slice_offset+8, device_pitch, device_shape_y);
}
const double value = evaluate_derivative<finite_diff_order>(id, prev_id1, prev_id2, prev_id3, prev_id4, prev_id5, prev_id6, prev_id7, prev_id8,
next_id1, next_id2, next_id3, next_id4, next_id5, next_id6, next_id7, next_id8,
device_cube, grid_type_z, local_pos_z, h_z);
if (valid_point) device_gradients_z[local_id] = multiplier * value;
}
// evaluate gradient to y direction
if (evaluate_gradients_y) {
int prev_id1 = -1, prev_id2 = -1, prev_id3 = -1, prev_id4 = -1, prev_id5 = -1, prev_id6 = -1, prev_id7 = -1, prev_id8 = -1,
next_id1 = -1, next_id2 = -1, next_id3 = -1, next_id4 = -1, next_id5 = -1, next_id6 = -1, next_id7 = -1, next_id8 = -1;
if (finite_diff_order >= 2 && y -1 >= 0) {
prev_id1 = getCubeOffset3D(x, y-1, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 2 && y + 1 < grid->shape[Y_]) {
next_id1 = getCubeOffset3D(x, y+1, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 3 && y - 2 >= 0) {
prev_id2 = getCubeOffset3D(x, y-2, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 3 && y + 2 < grid->shape[Y_]) {
next_id2 = getCubeOffset3D(x, y+2, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 4 && y - 3 >= 0) {
prev_id3 = getCubeOffset3D(x, y-3, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 4 && y + 3 < grid->shape[Y_]) {
next_id3 = getCubeOffset3D(x, y+3, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 5 && y - 4 >= 0) {
prev_id4 = getCubeOffset3D(x, y-4, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 5 && y + 4 < grid->shape[Y_]) {
next_id4 = getCubeOffset3D(x, y+4, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 6 && y - 5 >= 0) {
prev_id5 = getCubeOffset3D(x, y-5, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 6 && y + 5 < grid->shape[Y_]) {
next_id5 = getCubeOffset3D(x, y+5, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 7 && y - 6 >= 0) {
prev_id6 = getCubeOffset3D(x, y-6, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 7 && y + 6 < grid->shape[Y_]) {
next_id6 = getCubeOffset3D(x, y+6, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 8 && y - 7 >= 0) {
prev_id7 = getCubeOffset3D(x, y-7, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 8 && y + 7 < grid->shape[Y_]) {
next_id7 = getCubeOffset3D(x, y+7, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 9 && y - 8 >= 0) {
prev_id8 = getCubeOffset3D(x, y-8, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 9 && y + 8 < grid->shape[Y_]) {
next_id8 = getCubeOffset3D(x, y+8, z+slice_offset, device_pitch, device_shape_y);
}
const double value = evaluate_derivative<finite_diff_order>(id, prev_id1, prev_id2, prev_id3, prev_id4, prev_id5, prev_id6, prev_id7, prev_id8,
next_id1, next_id2, next_id3, next_id4, next_id5, next_id6, next_id7, next_id8,
device_cube, grid_type_y, local_pos_y, h_y);
if (valid_point) device_gradients_y[local_id] = multiplier * value;
}
// evaluate gradient to x direction
if (evaluate_gradients_x) {
int prev_id1 = -1, prev_id2 = -1, prev_id3 = -1, prev_id4 = -1, prev_id5 = -1, prev_id6 = -1, prev_id7 = -1, prev_id8 = -1,
next_id1 = -1, next_id2 = -1, next_id3 = -1, next_id4 = -1, next_id5 = -1, next_id6 = -1, next_id7 = -1, next_id8 = -1;
if (finite_diff_order >= 2 && x - 1 >= 0) {
prev_id1 = getCubeOffset3D(x-1, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 2 && x + 1 < grid->shape[X_]) {
next_id1 = getCubeOffset3D(x+1, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 3 && x - 2 >= 0) {
prev_id2 = getCubeOffset3D(x-2, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 3 && x + 2 < grid->shape[X_]) {
next_id2 = getCubeOffset3D(x+2, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 4 && x - 3 >= 0) {
prev_id3 = getCubeOffset3D(x-3, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 4 && x + 3 < grid->shape[X_]) {
next_id3 = getCubeOffset3D(x+3, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 5 && x - 4 >= 0) {
prev_id4 = getCubeOffset3D(x-4, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 5 && x + 4 < grid->shape[X_]) {
next_id4 = getCubeOffset3D(x+4, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 6 && x - 5 >= 0) {
prev_id5 = getCubeOffset3D(x-5, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 6 && x + 5 < grid->shape[X_]) {
next_id5 = getCubeOffset3D(x+5, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 7 && x - 6 >= 0) {
prev_id6 = getCubeOffset3D(x-6, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 7 && x + 6 < grid->shape[X_]) {
next_id6 = getCubeOffset3D(x+6, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 8 && x - 7 >= 0) {
prev_id7 = getCubeOffset3D(x-7, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 8 && x + 7 < grid->shape[X_]) {
next_id7 = getCubeOffset3D(x+7, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 9 && x - 8 >= 0) {
prev_id8 = getCubeOffset3D(x-8, y, z+slice_offset, device_pitch, device_shape_y);
}
if (finite_diff_order >= 9 && x + 8 < grid->shape[X_]) {
next_id8 = getCubeOffset3D(x+8, y, z+slice_offset, device_pitch, device_shape_y);
}
const double value = evaluate_derivative<finite_diff_order>(id, prev_id1, prev_id2, prev_id3, prev_id4, prev_id5, prev_id6, prev_id7, prev_id8,
next_id1, next_id2, next_id3, next_id4, next_id5, next_id6, next_id7, next_id8,
device_cube, grid_type_x, local_pos_x, h_x);
if (valid_point) device_gradients_x[local_id] = multiplier * value;
}
return;
}
/*
* Evaluate values of the radial gradients at bubbles, i.e.,
* the radial gradients of input bubbles are evaluated to the
* result bubbles values.
*
* @param nlip - number of lagrange integration polyniomials per
* cell, i.e., the number of grid points per cell
*/
template <int nlip>
__device__ inline void BubblesEvaluator_evaluate_radial_gradients(
const Grid1D* __restrict__ grid,
// maximum quantum number 'l'
const int lmax,
// k value for the bubble
const int &k,
// constant pointer to a double array representing the
// input bubbles f_Alm coefficients
const double* __restrict__ f,
// pointer to a variable double array representing the
// output bubbles g_Alm coefficients
double* result,
// global offset in warps
const int warp_offset
) {
int global_warp_id, thread_order_number, cells_per_warp;
bool valid_point = true;
const int WARP_SIZE = 32;
// order number of handled point
const int id = threadIdx.x + blockIdx.x * blockDim.x;
// if nlip is 7, each warp of 32 handles 5 cells
if (nlip == 7) {
// get the global warp order number
global_warp_id = id / WARP_SIZE + warp_offset;
cells_per_warp = 5;
// get the order number of thread within the warp
thread_order_number = threadIdx.x % WARP_SIZE;
if (thread_order_number == 31) valid_point = false;
}
// number of cells
const int ncell = grid->ncell;
// get the order number of cell
int icell = global_warp_id * cells_per_warp + thread_order_number / (nlip-1);
// order number of point in cell
int in_cell_point = thread_order_number % (nlip-1);
// let's set it up so that the nlip:th point in cell belongs to the previous cell
if (in_cell_point == 0 && icell > 0) {
icell -= 1;
in_cell_point = nlip;
}
if (thread_order_number == 0 && in_cell_point != 0) valid_point = false;
// if the cell number is not within the evaluated range, we do not evaluate the
// values
bool participant = true;
if (icell >= ncell ) {
participant = false;
}
double in_cell_coordinate = (double)(in_cell_point-3);
// read the LIPs in the shared memory
__shared__ double lip[nlip * nlip];
read_lip<nlip, nlip>(grid->lip, threadIdx.x, lip);
__shared__ double derivative_lip[(nlip-1) * nlip];
read_lip<nlip-1, nlip>(grid->derivative_lip, threadIdx.x, derivative_lip);
if (participant) {
// evaluate the derivative polynomials
double derivative_polynomials[nlip];
evaluate_polynomials<nlip-1, nlip>(derivative_lip, in_cell_coordinate, derivative_polynomials);
double one_per_grid_step = 1.0 / grid->h[icell];
// finally, multiply the derivative polynomials with 1 / grid_step
for (int j = 0; j < nlip; j++) {
derivative_polynomials[j] *= one_per_grid_step;
}
// get the initial address:
int address = icell * (nlip-1) + in_cell_point;
for (int n = 0; n < (lmax+1) * (lmax+1); n++) {
// get the input function values
double value = f[address];
// and evaluate the radial coefficients
double temp = evaluate_coefficients_shuffle<nlip>(derivative_polynomials, value, thread_order_number, in_cell_point);
// if the point is valid, stored the result
if (valid_point) result[address] = temp;
// add the address by one n index:
address += ncell * nlip;
}
}
}
/**************************************************************
* Error checking *
**************************************************************/
__host__ inline void check_eval_errors(const char *filename, const int line_number) {
//#ifdef DEBUG_CUDA
cudaDeviceSynchronize();
//#endif
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("CUDA error at %s:%i: %s\n", filename, line_number, cudaGetErrorString(error));
exit(-1);
}
}
/**************************************************************
* BubblesEvaluator-implementation *
**************************************************************/
BubblesEvaluator::BubblesEvaluator(StreamContainer *streamContainer) {
this->streamContainer = streamContainer;
}
void BubblesEvaluator::setBubbles(Bubbles *bubbles) {
this->bubbles = bubbles;
}
/*
* Evaluate the bubbles at grid points.
*
* @param bubbles - The bubbles that are evaluated in to the grid
* @param grid - The grid associated with all the output cubes
*/
void BubblesEvaluator::evaluateGrid(Grid3D *grid, CudaCube *result_cube, CudaCube *gradient_cube_x, CudaCube *gradient_cube_y, CudaCube *gradient_cube_z, int gradient_direction, int fin_diff_ord) {
if (gradient_direction == X_) {
this->bubbles->inject(grid, result_cube, 0, gradient_cube_x,
gradient_cube_y, gradient_cube_z, false, true, false, false);
}
else if (gradient_direction == Y_) {
this->bubbles->inject(grid, result_cube, 0, gradient_cube_x,
gradient_cube_y, gradient_cube_z, false, false, true, false);
}
else if (gradient_direction == Z_) {
this->bubbles->inject(grid, result_cube, 0, gradient_cube_x,
gradient_cube_y, gradient_cube_z, false, false, false, true);
}
else if (gradient_direction == 3) {
this->bubbles->inject(grid, result_cube, 0, gradient_cube_x,
gradient_cube_y, gradient_cube_z, true, true, true, true);
}
else {
this->bubbles->inject(grid, result_cube);
}
}
/*
* Deallocate the device and host memory allocated for this object.
*/
void BubblesEvaluator::destroy() {
this->streamContainer = NULL;
this->bubbles = NULL;
}
/**************************************************************
* CubeEvaluator-implementation *
**************************************************************/
CubeEvaluator::CubeEvaluator(StreamContainer *streamContainer) {
this->streamContainer = streamContainer;
}
/*
* Deallocate the device and host memory allocated for this object.
*/
void CubeEvaluator::destroy() {
this->streamContainer = NULL;
this->input_cube = NULL;
this->grid = NULL;
}
/*
* Set the input cube from which the evaluation is performed.
*
* @param input_cube - CudaCube object from which the evaluation is performed. The shape
* of the data should be according to the given grid
*/
void CubeEvaluator::setInputCube(CudaCube *input_cube) {
this->input_cube = input_cube;
}
/*
* Set the input grid from which the evaluation is performed.
*
* @param input_grid - Grid3D object defining the shape of the cube for which the evaluation is performed.
*/
void CubeEvaluator::setInputGrid(Grid3D *input_grid) {
this->grid = input_grid;
}
/*
* Evaluate the cube at preset points. The results are stored in the device memory.
* @param result_points - Points-object in which the results are stored, if gradient_direction=0-2, the results are stored here
* @param gradient_points_x - Points-object in which the gradiends in x-direction are stored, if gradient_direction=3
* @param gradient_points_y - Points-object in which the gradiends in y-direction are stored, if gradient_direction=3
* @param gradient_points_z - Points-object in which the gradiends in z-direction are stored, if gradient_direction=3
* @param gradient_direction - possible values X_ = 0, Y_ = 1, Z_ = 2, (X_, Y_, Z_) = 3 && this->evaluateGradients
* anything else: no gradients
*/
void CubeEvaluator::evaluatePoints(Points *result_points,
Points *gradient_points_x,
Points *gradient_points_y,
Points *gradient_points_z,
int gradient_direction) {
int warp_size = 32;
int total_warp_count = result_points->point_coordinates->number_of_points / warp_size + ((result_points->point_coordinates->number_of_points % warp_size) > 0);
int point_offset = 0;
int *cube_memory_shape = this->input_cube->getDeviceMemoryShape();
check_eval_errors(__FILE__, __LINE__);
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// allocate space for device results and device points
int device_warp_count = total_warp_count / this->streamContainer->getNumberOfDevices()
+ ((total_warp_count % this->streamContainer->getNumberOfDevices()) > device);
int device_point_count = device_warp_count * warp_size;
int device_point_offset = 0;
// get the order number of 'device' in cube's streamcontainer
int cube_device = this->input_cube->getStreamContainer()->getDeviceOrderNumber(this->streamContainer->getDeviceNumber(device));
check_eval_errors(__FILE__, __LINE__);
// get the pointers to the device points & results
double *device_points_ptr = result_points->point_coordinates->device_coordinates[device];
double *device_results_ptr = result_points->device_values[device];
double *device_gradients_x_ptr;
double *device_gradients_y_ptr;
double *device_gradients_z_ptr;
if (gradient_direction == 3) {
device_gradients_x_ptr = gradient_points_x->device_values[device];
device_gradients_y_ptr = gradient_points_y->device_values[device];
device_gradients_z_ptr = gradient_points_z->device_values[device];
}
else if (gradient_direction < 3 && gradient_direction >= 0) {
device_gradients_x_ptr = result_points->device_values[device];
device_gradients_y_ptr = result_points->device_values[device];
device_gradients_z_ptr = result_points->device_values[device];
}
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// get the number of points that are in the responsibility of this stream
int stream_warp_count = device_warp_count / this->streamContainer->getStreamsPerDevice()
+ ((device_warp_count % streamContainer->getStreamsPerDevice()) > stream);
int stream_point_count = stream_warp_count * warp_size;
// make sure that the last stream does not go over board
if (stream_point_count + point_offset > result_points->point_coordinates->number_of_points) {
stream_point_count = result_points->point_coordinates->number_of_points - point_offset;
}
if (stream_point_count > 0) {
// set the result to zero
check_eval_errors(__FILE__, __LINE__);
int grid_size = (stream_point_count + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (gradient_direction == X_) {
CubeEvaluator_evaluate_points <false, true, false, false>
<<< grid_size, BLOCK_SIZE, 0,
*this->streamContainer->getStream(device, stream) >>>
(this->input_cube->getDevicePointer(cube_device),
this->input_cube->getDevicePitch(cube_device),
cube_memory_shape[Y_],
// TODO: replace the cube_device with grid_device in below line, probably does not
// matter but we have to be careful.
grid->device_copies[cube_device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
stream_point_count,
device_point_offset,
1.0
);
}
else if (gradient_direction == Y_) {
CubeEvaluator_evaluate_points <false, false, true, false>
<<< grid_size, BLOCK_SIZE, 0,
*this->streamContainer->getStream(device, stream) >>>
(this->input_cube->getDevicePointer(cube_device),
this->input_cube->getDevicePitch(cube_device),
cube_memory_shape[Y_],
// TODO: replace the cube_device with grid_device in below line, probably does not
// matter but we have to be careful.
this->grid->device_copies[cube_device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
stream_point_count,
device_point_offset,
1.0
);
}
else if (gradient_direction == Z_) {
CubeEvaluator_evaluate_points <false, false, false, true>
<<< grid_size, BLOCK_SIZE, 0,
*this->streamContainer->getStream(device, stream) >>>
(this->input_cube->getDevicePointer(cube_device),
this->input_cube->getDevicePitch(cube_device),
cube_memory_shape[Y_],
// TODO: replace the cube_device with grid_device in below line, probably does not
// matter but we have to be careful.
this->grid->device_copies[cube_device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
stream_point_count,
device_point_offset,
1.0
);
}
else if (gradient_direction == 3) {
CubeEvaluator_evaluate_points <true, true, true, true>
<<< grid_size, BLOCK_SIZE, 0,
*this->streamContainer->getStream(device, stream) >>>
(this->input_cube->getDevicePointer(cube_device),
this->input_cube->getDevicePitch(cube_device),
cube_memory_shape[Y_],
// TODO: replace the cube_device with grid_device in below line, probably does not
// matter but we have to be careful.
this->grid->device_copies[cube_device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
stream_point_count,
device_point_offset,
1.0
);
}
else {
CubeEvaluator_evaluate_points <true, false, false, false>
<<< grid_size, BLOCK_SIZE, 0,
*this->streamContainer->getStream(device, stream) >>>
(this->input_cube->getDevicePointer(cube_device),
this->input_cube->getDevicePitch(cube_device),
cube_memory_shape[Y_],
// TODO: replace the cube_device with grid_device in below line, probably does not
// matter but we have to be careful.
this->grid->device_copies[cube_device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
stream_point_count,
device_point_offset,
1.0
);
}
}
check_eval_errors(__FILE__, __LINE__);
// add the pointers
point_offset += stream_point_count;
device_point_offset += stream_point_count;
}
check_eval_errors(__FILE__, __LINE__);
}
}
/*
* Evaluate the cube at the points of grid. The results are stored in the device memory
* in the result_cube and gradient_cubes. The latter only occurs if gradient_direction == 3.
* true.
*
* @param grid - The grid associated with all the input and output cubes
* @param results_cube - CudaCube where the results are stored, if gradient direction is 0-2, the gradients will be stored here
* @param gradients_cube_x - CudaCube where the x-gradients are stored if the gradient_direction=3
* @param gradients_cube_y - CudaCube where the y-gradients are stored if the gradient_direction=3
* @param gradients_cube_z - CudaCube where the z-gradients are stored if the gradient_direction=3
* @param gradient_direction - possible values X_ = 0, Y_ = 1, Z_ = 2, (X_, Y_, Z_) = 3 && this->evaluateGradients
* anything else: no gradients
*
*/
void CubeEvaluator::evaluateGrid(Grid3D *grid,
CudaCube *result_cube,
CudaCube *gradient_cube_x,
CudaCube *gradient_cube_y,
CudaCube *gradient_cube_z,
const int gradient_direction,
const int finite_diff_order) {
check_eval_errors(__FILE__, __LINE__);
// printf("fin diff order in evaluateGrid: %i, %i \n", finite_diff_order, gradient_direction);
int total_slice_count = result_cube->getShape(Z_);
// the minimum l is 0 always in the multiplication
int device_slice_count;
// get the input cube pointer
// TODO: we are assuming here, that the input and output cubes have the same
// memory shapes, this is probably not the case in all occasions in the future
double **device_input_cubes = this->input_cube->getDeviceCubes();
// get the pointer arrays from the cubes
double **device_cubes = result_cube->getDeviceCubes();
double **device_gradients_x;
double **device_gradients_y;
double **device_gradients_z;
// get the device gradient result pointers
if (gradient_direction < 3) {
device_gradients_x = result_cube->getDeviceCubes();
device_gradients_y = result_cube->getDeviceCubes();
device_gradients_z = result_cube->getDeviceCubes();
}
else {
device_gradients_x = gradient_cube_x->getDeviceCubes();
device_gradients_y = gradient_cube_y->getDeviceCubes();
device_gradients_z = gradient_cube_z->getDeviceCubes();
}
size_t *device_pitches = result_cube->getDevicePitches();
int *device_memory_shape = result_cube->getDeviceMemoryShape();
// init some stuff to help calculate the launch parameters
// NOTE: these are for nlip: 7
//int cells_per_block = BLOCK_SIZE / 32 * 5;
int warps_per_string = grid->axis[X_]->ncell / 5 + 1;
int warps_per_slice = grid->axis[Y_]->ncell * warps_per_string;
int warps_per_block = BLOCK_SIZE / 32;
int slice_offset = 0;
// copy the cubes to the device & execute the kernels
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
// set the used device (gpu)
this->streamContainer->setDevice(device);
//double *dev_cube = device_cubes[device];
double *dev_input_cube = device_input_cubes[device];
double *dev_gradient_x = device_gradients_x[device];
double *dev_gradient_y = device_gradients_y[device];
double *dev_gradient_z = device_gradients_z[device];
int device_slice_offset = 0;
// calculate the number of vectors this device handles
device_slice_count = total_slice_count / this->streamContainer->getNumberOfDevices()
+ ((total_slice_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream++) {
// determine the count of vectors handled by this stream
int slice_count = device_slice_count / this->streamContainer->getStreamsPerDevice()
+ ((device_slice_count % this->streamContainer->getStreamsPerDevice()) > stream);
check_eval_errors(__FILE__, __LINE__);
if (slice_count > 0) {
// calculate the launch configuration for the f1-inject
//int grid_size = warps_per_slice * slice_count * warps_per_block + 1;
dim3 block, launch_grid;
result_cube->getLaunchConfiguration(&launch_grid, &block, slice_count, BLOCK_SIZE);
// call the kernel
if (gradient_direction == X_ && finite_diff_order==7) {
CubeEvaluator_evaluate_simple_grid_gradients <7, true, false, false>
<<< launch_grid, block, 0,
*this->streamContainer->getStream(device, stream) >>>
(dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
if (gradient_direction == X_ && finite_diff_order==9) {
CubeEvaluator_evaluate_simple_grid_gradients <9, true, false, false>
<<< launch_grid, block, 0,
*this->streamContainer->getStream(device, stream) >>>
(dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
else if (gradient_direction == Y_ && finite_diff_order==7) {
CubeEvaluator_evaluate_simple_grid_gradients <7, false, true, false>
<<< launch_grid, block, 0,
*this->streamContainer->getStream(device, stream) >>>
(dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
else if (gradient_direction == Y_ && finite_diff_order==9) {
CubeEvaluator_evaluate_simple_grid_gradients <9, false, true, false>
<<< launch_grid, block, 0,
*this->streamContainer->getStream(device, stream) >>>
(dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
else if (gradient_direction == Z_ && finite_diff_order==7) {
CubeEvaluator_evaluate_simple_grid_gradients <7, false, false, true>
<<< launch_grid, block, 0,
*this->streamContainer->getStream(device, stream) >>>
(dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
else if (gradient_direction == Z_ && finite_diff_order==9) {
CubeEvaluator_evaluate_simple_grid_gradients <9, false, false, true>
<<< launch_grid, block, 0,
*this->streamContainer->getStream(device, stream) >>>
(dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
else if (gradient_direction == 3 && finite_diff_order==7) {
CubeEvaluator_evaluate_simple_grid_gradients <7, true, true, true>
<<< launch_grid, block, 0,
*this->streamContainer->getStream(device, stream) >>>
(dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
else if (gradient_direction == 3 && finite_diff_order==9) {
CubeEvaluator_evaluate_simple_grid_gradients <9, true, true, true>
<<< launch_grid, block, 0,
*this->streamContainer->getStream(device, stream) >>>
(dev_input_cube,
device_pitches[device],
device_memory_shape[Y_],
grid->device_copies[device],
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
// number of slices handled by this device
// in previous calls
device_slice_offset,
// number of slices handled by all devices
// in previous calls
slice_offset,
// number of slices handled by this call
slice_count,
//warps_per_string,
1.0);
}
check_eval_errors(__FILE__, __LINE__);
// increase the address by the number of vectors in this array
device_slice_offset += slice_count;
slice_offset += slice_count;
}
}
}
}
/********************************************
* Fortran interfaces for Evaluator *
********************************************/
extern "C" void evaluator_evaluate_grid_cuda(Evaluator *evaluator, Grid3D *grid, CudaCube *result_cube, CudaCube *gradient_cube_x, CudaCube *gradient_cube_y, CudaCube *gradient_cube_z, int gradient_direction, int fin_diff_ord) {
evaluator->evaluateGrid(grid, result_cube, gradient_cube_x, gradient_cube_y, gradient_cube_z, gradient_direction, fin_diff_ord);
}
extern "C" void evaluator_evaluate_grid_without_gradients_cuda(Evaluator *evaluator, Grid3D *grid, CudaCube *result_cube, int fin_diff_ord) {
evaluator->evaluateGrid(grid, result_cube, NULL, NULL, NULL, -1, fin_diff_ord);
}
extern "C" void evaluator_evaluate_grid_x_gradients_cuda(Evaluator *evaluator, Grid3D *grid, CudaCube *result_cube, int fin_diff_ord) {
evaluator->evaluateGrid(grid, result_cube, NULL, NULL, NULL, X_, fin_diff_ord);
}
extern "C" void evaluator_evaluate_grid_y_gradients_cuda(Evaluator *evaluator, Grid3D *grid, CudaCube *result_cube, int fin_diff_ord) {
evaluator->evaluateGrid(grid, result_cube, NULL, NULL, NULL, Y_, fin_diff_ord);
}
extern "C" void evaluator_evaluate_grid_z_gradients_cuda(Evaluator *evaluator, Grid3D *grid, CudaCube *result_cube, int fin_diff_ord) {
evaluator->evaluateGrid(grid, result_cube, NULL, NULL, NULL, Z_, fin_diff_ord);
}
extern "C" void evaluator_evaluate_points_cuda(Evaluator *evaluator, Points *result_points, Points *gradient_points_x, Points *gradient_points_y, Points *gradient_points_z, int gradient_direction) {
evaluator->evaluatePoints(result_points, gradient_points_x, gradient_points_y, gradient_points_z, gradient_direction);
}
extern "C" void evaluator_evaluate_points_without_gradients_cuda(Evaluator *evaluator, Points *result_points) {
evaluator->evaluatePoints(result_points, NULL, NULL, NULL, -1);
}
extern "C" void evaluator_evaluate_points_x_gradients_cuda(Evaluator *evaluator, Points *result_points) {
evaluator->evaluatePoints(result_points, NULL, NULL, NULL, X_);
}
extern "C" void evaluator_evaluate_points_y_gradients_cuda(Evaluator *evaluator, Points *result_points) {
evaluator->evaluatePoints(result_points, NULL, NULL, NULL, Y_);
}
extern "C" void evaluator_evaluate_points_z_gradients_cuda(Evaluator *evaluator, Points *result_points) {
evaluator->evaluatePoints(result_points, NULL, NULL, NULL, Z_);
}
extern "C" void evaluator_destroy_cuda(Evaluator *evaluator) {
evaluator->destroy();
}
/********************************************
* Fortran interfaces for BubblesEvaluator *
********************************************/
extern "C" BubblesEvaluator *bubblesevaluator_init_cuda(StreamContainer *streamContainer) {
BubblesEvaluator *new_bubbles_evaluator = new BubblesEvaluator(streamContainer);
return new_bubbles_evaluator;
}
extern "C" void bubblesevaluator_set_bubbles_cuda(BubblesEvaluator *bubbles_evaluator, Bubbles *bubbles) {
bubbles_evaluator->setBubbles(bubbles);
}
/********************************************
* Fortran interfaces for CubeEvaluator *
********************************************/
extern "C" CubeEvaluator *cubeevaluator_init_cuda(StreamContainer *streamContainer) {
CubeEvaluator *new_cube_evaluator = new CubeEvaluator(streamContainer);
return new_cube_evaluator;
}
extern "C" void cubeevaluator_set_input_cube_cuda(CubeEvaluator *cube_evaluator, CudaCube *cube) {
cube_evaluator->setInputCube(cube);
}
extern "C" void cubeevaluator_set_input_grid_cuda(CubeEvaluator *cube_evaluator, Grid3D *grid) {
cube_evaluator->setInputGrid(grid);
}
|
c5f7884cf22328ef6af1ceb9ce916b5681942515.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "update_part_props.cuh"
#include "fill.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/gpu_data/partitions.h>
namespace NKernel {
template <int BlockSize>
__forceinline__ __device__ double ComputeSum(const float* __restrict__ stat, ui32 offset, ui32 size, int blockIdx, int blockCount) {
float4 sum;
sum.x = sum.y = sum.z = sum.w = 0;
stat += offset;
const int warpSize = 32;
const int alignSize = 4 * warpSize;
{
int lastId = min(size, alignSize - (offset % alignSize));
if (blockIdx == 0) {
if (threadIdx.x < lastId) {
sum.x += Ldg(stat + threadIdx.x);
}
}
size = max(size - lastId, 0);
stat += lastId;
}
//now lets align end
const int unalignedTail = (size % alignSize);
if (unalignedTail != 0) {
if (blockIdx == 0) {
const int tailOffset = size - unalignedTail;
if (threadIdx.x < unalignedTail) {
sum.y += Ldg(stat + tailOffset + threadIdx.x);
}
}
}
size -= unalignedTail;
const int entriesPerWarp = warpSize * 4;
const int warpsPerBlock = (BlockSize / 32);
const int globalWarpId = (blockIdx * warpsPerBlock) + (threadIdx.x / 32);
stat += globalWarpId * entriesPerWarp;
size = max(size - globalWarpId * entriesPerWarp, 0);
const int stripeSize = entriesPerWarp * warpsPerBlock * blockCount;
const int localIdx = (threadIdx.x & 31) * 4;
const int iterCount = (size - localIdx + stripeSize - 1) / stripeSize;
stat += localIdx;
if (size > 0) {
for (int i = 0; i < iterCount; ++i) {
const float4* stat4 = (const float4*) stat;
float4 val = Ldg(stat4);
sum.x += val.x;
sum.y += val.y;
sum.z += val.z;
sum.w += val.w;
stat += stripeSize;
}
}
return (double)sum.x + (double)sum.y + (double)sum.z + (double)sum.w;
};
template <int BlockSize, class TOutput>
__launch_bounds__(BlockSize, 2)
__global__ void UpdatePartitionsPropsForOffsetsImpl(const ui32* offsets,
const float* source,
ui64 statLineSize,
TOutput* statSums) {
const ui32 partOffset = __ldg(offsets + blockIdx.y);
const ui32 partSize = __ldg(offsets + blockIdx.y + 1) - partOffset;
const ui32 statId = blockIdx.z;
__shared__ volatile double localBuffer[BlockSize];
source += statId * statLineSize;
const int minDocsPerBlock = BlockSize * 16;
const int effectiveBlockCount = min(gridDim.x, (partSize + minDocsPerBlock - 1) / minDocsPerBlock);
if (blockIdx.x >= effectiveBlockCount) {
return;
}
const int blockId = blockIdx.x % effectiveBlockCount;
localBuffer[threadIdx.x] = ComputeSum<BlockSize>(source, partOffset, partSize, blockId, effectiveBlockCount);
__syncthreads();
double result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize);
if (threadIdx.x == 0 && abs(result) > 1e-20) {
TOutput* writeDst = statSums + statId + blockIdx.y * gridDim.z;
TOutput addVal = (TOutput)result;
TAtomicAdd<TOutput>::Add(writeDst, addVal);
}
}
template <int BlockSize>
__launch_bounds__(BlockSize, 2)
__global__ void UpdatePartitionsPropsImpl(const ui32* partIds,
const TDataPartition* parts,
const float* source,
ui64 statLineSize,
double* statSums) {
const ui32 leafId = partIds[blockIdx.y];
TDataPartition part = parts[leafId];
const ui32 statId = blockIdx.z;
__shared__ volatile double localBuffer[BlockSize];
source += statId * statLineSize;
const int minDocsPerBlock = BlockSize * 16;
const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock);
if (blockIdx.x >= effectiveBlockCount) {
return;
}
const int blockId = blockIdx.x % effectiveBlockCount;
localBuffer[threadIdx.x] = ComputeSum<BlockSize>(source, part.Offset, part.Size, blockId, effectiveBlockCount);
__syncthreads();
double result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize);
if (threadIdx.x == 0 && abs(result) > 1e-20) {
TAtomicAdd<double>::Add(statSums + statId + leafId * gridDim.z, result);
}
}
__global__ void ClearPartPropsImpl(const ui32* partIds, ui32 partCount,
ui32 statCount,
double* statSums) {
const ui32 warpId = threadIdx.x / 32;
const ui32 warpCount = blockDim.x / 32;
for (ui32 partId = warpId; partId < partCount; partId += warpCount) {
ui32 leafId = partIds[partId];
for (ui32 statId = threadIdx.x & 31; statId < statCount; statId += 32) {
statSums[statCount * leafId + statId] = 0.0;
}
}
}
void UpdatePartitionsProps(const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const float* source,
ui32 statCount,
ui64 statLineSize,
double* statSums,
TCudaStream stream
) {
const ui32 blockSize = 512;
dim3 numBlocks;
numBlocks.y = partCount;
numBlocks.z = statCount;
numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount);
{
hipLaunchKernelGGL(( ClearPartPropsImpl), dim3(1), dim3(1024), 0, stream, partIds, partCount, statCount, statSums);
}
hipLaunchKernelGGL(( UpdatePartitionsPropsImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, partIds, parts, source, statLineSize, statSums);
}
void UpdatePartitionsPropsForSplit(const TDataPartition* parts,
const ui32* leftPartIds,
const ui32* rightPartIds,
ui32 partCount,
const float* source,
ui32 statCount,
ui64 statLineSize,
double* statSums,
TCudaStream stream) {
//TODO(noxoomo): if it'll be "slow", could be made in one kernel
UpdatePartitionsProps(parts, leftPartIds, partCount, source, statCount, statLineSize, statSums, stream);
UpdatePartitionsProps(parts, rightPartIds, partCount, source, statCount, statLineSize, statSums, stream);
}
void UpdatePartitionsPropsForOffsets(const ui32* offsets, ui32 count,
const float* source,
ui32 statCount,
ui64 statLineSize,
double* statSums,
TCudaStream stream
) {
const ui32 blockSize = 512;
dim3 numBlocks;
numBlocks.y = count;
numBlocks.z = statCount;
numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount);
{
FillBuffer(statSums, 0.0, count * statCount, stream);
}
hipLaunchKernelGGL(( UpdatePartitionsPropsForOffsetsImpl<blockSize, double>), dim3(numBlocks), dim3(blockSize), 0, stream, offsets, source, statLineSize, statSums);
}
__global__ void FloatToDoubleImpl(const float* src, ui32 size, double* dst) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
WriteThrough(dst + i, (double)__ldg(src + i));
}
}
void CopyFloatToDouble(const float* src, ui32 size, double* dst, TCudaStream stream) {
const ui32 blockSize = 128;
const ui32 numBlocks = CeilDivide(size, blockSize);
if (numBlocks) {
hipLaunchKernelGGL(( FloatToDoubleImpl), dim3(numBlocks), dim3(blockSize), 0, stream, src, size, dst);
}
}
}
| c5f7884cf22328ef6af1ceb9ce916b5681942515.cu | #include "update_part_props.cuh"
#include "fill.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/gpu_data/partitions.h>
namespace NKernel {
template <int BlockSize>
__forceinline__ __device__ double ComputeSum(const float* __restrict__ stat, ui32 offset, ui32 size, int blockIdx, int blockCount) {
float4 sum;
sum.x = sum.y = sum.z = sum.w = 0;
stat += offset;
const int warpSize = 32;
const int alignSize = 4 * warpSize;
{
int lastId = min(size, alignSize - (offset % alignSize));
if (blockIdx == 0) {
if (threadIdx.x < lastId) {
sum.x += Ldg(stat + threadIdx.x);
}
}
size = max(size - lastId, 0);
stat += lastId;
}
//now lets align end
const int unalignedTail = (size % alignSize);
if (unalignedTail != 0) {
if (blockIdx == 0) {
const int tailOffset = size - unalignedTail;
if (threadIdx.x < unalignedTail) {
sum.y += Ldg(stat + tailOffset + threadIdx.x);
}
}
}
size -= unalignedTail;
const int entriesPerWarp = warpSize * 4;
const int warpsPerBlock = (BlockSize / 32);
const int globalWarpId = (blockIdx * warpsPerBlock) + (threadIdx.x / 32);
stat += globalWarpId * entriesPerWarp;
size = max(size - globalWarpId * entriesPerWarp, 0);
const int stripeSize = entriesPerWarp * warpsPerBlock * blockCount;
const int localIdx = (threadIdx.x & 31) * 4;
const int iterCount = (size - localIdx + stripeSize - 1) / stripeSize;
stat += localIdx;
if (size > 0) {
for (int i = 0; i < iterCount; ++i) {
const float4* stat4 = (const float4*) stat;
float4 val = Ldg(stat4);
sum.x += val.x;
sum.y += val.y;
sum.z += val.z;
sum.w += val.w;
stat += stripeSize;
}
}
return (double)sum.x + (double)sum.y + (double)sum.z + (double)sum.w;
};
template <int BlockSize, class TOutput>
__launch_bounds__(BlockSize, 2)
__global__ void UpdatePartitionsPropsForOffsetsImpl(const ui32* offsets,
const float* source,
ui64 statLineSize,
TOutput* statSums) {
const ui32 partOffset = __ldg(offsets + blockIdx.y);
const ui32 partSize = __ldg(offsets + blockIdx.y + 1) - partOffset;
const ui32 statId = blockIdx.z;
__shared__ volatile double localBuffer[BlockSize];
source += statId * statLineSize;
const int minDocsPerBlock = BlockSize * 16;
const int effectiveBlockCount = min(gridDim.x, (partSize + minDocsPerBlock - 1) / minDocsPerBlock);
if (blockIdx.x >= effectiveBlockCount) {
return;
}
const int blockId = blockIdx.x % effectiveBlockCount;
localBuffer[threadIdx.x] = ComputeSum<BlockSize>(source, partOffset, partSize, blockId, effectiveBlockCount);
__syncthreads();
double result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize);
if (threadIdx.x == 0 && abs(result) > 1e-20) {
TOutput* writeDst = statSums + statId + blockIdx.y * gridDim.z;
TOutput addVal = (TOutput)result;
TAtomicAdd<TOutput>::Add(writeDst, addVal);
}
}
template <int BlockSize>
__launch_bounds__(BlockSize, 2)
__global__ void UpdatePartitionsPropsImpl(const ui32* partIds,
const TDataPartition* parts,
const float* source,
ui64 statLineSize,
double* statSums) {
const ui32 leafId = partIds[blockIdx.y];
TDataPartition part = parts[leafId];
const ui32 statId = blockIdx.z;
__shared__ volatile double localBuffer[BlockSize];
source += statId * statLineSize;
const int minDocsPerBlock = BlockSize * 16;
const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock);
if (blockIdx.x >= effectiveBlockCount) {
return;
}
const int blockId = blockIdx.x % effectiveBlockCount;
localBuffer[threadIdx.x] = ComputeSum<BlockSize>(source, part.Offset, part.Size, blockId, effectiveBlockCount);
__syncthreads();
double result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize);
if (threadIdx.x == 0 && abs(result) > 1e-20) {
TAtomicAdd<double>::Add(statSums + statId + leafId * gridDim.z, result);
}
}
__global__ void ClearPartPropsImpl(const ui32* partIds, ui32 partCount,
ui32 statCount,
double* statSums) {
const ui32 warpId = threadIdx.x / 32;
const ui32 warpCount = blockDim.x / 32;
for (ui32 partId = warpId; partId < partCount; partId += warpCount) {
ui32 leafId = partIds[partId];
for (ui32 statId = threadIdx.x & 31; statId < statCount; statId += 32) {
statSums[statCount * leafId + statId] = 0.0;
}
}
}
void UpdatePartitionsProps(const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const float* source,
ui32 statCount,
ui64 statLineSize,
double* statSums,
TCudaStream stream
) {
const ui32 blockSize = 512;
dim3 numBlocks;
numBlocks.y = partCount;
numBlocks.z = statCount;
numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount);
{
ClearPartPropsImpl<<<1, 1024, 0, stream>>>(partIds, partCount, statCount, statSums);
}
UpdatePartitionsPropsImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(partIds, parts, source, statLineSize, statSums);
}
void UpdatePartitionsPropsForSplit(const TDataPartition* parts,
const ui32* leftPartIds,
const ui32* rightPartIds,
ui32 partCount,
const float* source,
ui32 statCount,
ui64 statLineSize,
double* statSums,
TCudaStream stream) {
//TODO(noxoomo): if it'll be "slow", could be made in one kernel
UpdatePartitionsProps(parts, leftPartIds, partCount, source, statCount, statLineSize, statSums, stream);
UpdatePartitionsProps(parts, rightPartIds, partCount, source, statCount, statLineSize, statSums, stream);
}
void UpdatePartitionsPropsForOffsets(const ui32* offsets, ui32 count,
const float* source,
ui32 statCount,
ui64 statLineSize,
double* statSums,
TCudaStream stream
) {
const ui32 blockSize = 512;
dim3 numBlocks;
numBlocks.y = count;
numBlocks.z = statCount;
numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount);
{
FillBuffer(statSums, 0.0, count * statCount, stream);
}
UpdatePartitionsPropsForOffsetsImpl<blockSize, double><<<numBlocks, blockSize, 0, stream>>>(offsets, source, statLineSize, statSums);
}
__global__ void FloatToDoubleImpl(const float* src, ui32 size, double* dst) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
WriteThrough(dst + i, (double)__ldg(src + i));
}
}
void CopyFloatToDouble(const float* src, ui32 size, double* dst, TCudaStream stream) {
const ui32 blockSize = 128;
const ui32 numBlocks = CeilDivide(size, blockSize);
if (numBlocks) {
FloatToDoubleImpl<<<numBlocks, blockSize, 0, stream>>>(src, size, dst);
}
}
}
|
bfcc5581686a2f3b6f48470feb10b472ecae4142.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include "gconv.h"
#include "common_hip.cuh"
template <int blockSize, typename scalar_t>
__global__ void gconv_cuda_forward_kernel(
const scalar_t* __restrict__ feature,
const int* __restrict__ edge_type,
const scalar_t* __restrict__ filter,
scalar_t* __restrict__ res,
size_t nin_feature,
size_t nedge_type
)
{
extern __shared__ int s [];
int batch_sz = blockIdx.x;
int node_id = blockIdx.y;
int nout = blockIdx.z;
int nnodes = gridDim.y;
int tid = threadIdx.x;
volatile scalar_t * partial_res = (scalar_t *) s;
const scalar_t* kernels = filter + nin_feature * nedge_type * nout;
const scalar_t* cfeature = feature + batch_sz * (nnodes * nin_feature) ;
const int * cedge_type = edge_type + batch_sz * nnodes * nnodes + node_id * nnodes;
scalar_t* cres = res + batch_sz * nnodes * gridDim.z + node_id * gridDim.z + nout;
partial_res[tid] = 0.0;
if(tid >= nin_feature * nnodes) return;
for(int i = tid; i < nin_feature * nnodes; i += blockDim.x){
int cnode_id = i / nin_feature;
int cfeature_id = i % nin_feature;
int edge_type_ = cedge_type[cnode_id];
if(edge_type_ >= 0){
partial_res[tid] += kernels[cfeature_id * nedge_type + edge_type_] * cfeature[i];
}
}
__syncthreads();
if (blockSize >= 512) {
if (tid < 256) { partial_res[tid] += partial_res[tid + 256]; }
__syncthreads(); }
if (blockSize >= 256) {
if (tid < 128) { partial_res[tid] += partial_res[tid + 128]; }
__syncthreads(); }
if (blockSize >= 128) {
if (tid < 64) { partial_res[tid] += partial_res[tid + 64]; }
__syncthreads(); }
if (tid < 32) warpReduceSum<blockSize, scalar_t>(partial_res, tid);
if (tid == 0){
cres[0] = partial_res[0];
}
}
#define run_kernel(bsize, type)hipLaunchKernelGGL(( gconv_cuda_forward_kernel<bsize, type>), dim3(blocks), dim3(threads), threads * sizeof(type), 0, feature, \
edge_type, \
filter, \
res, \
nin_features, \
nedge_type)
template <typename scalar_t>
void gconv_cuda_forward_runner(const scalar_t * feature,
const int * edge_type,
const scalar_t * filter,
scalar_t * res,
size_t batch_size,
size_t nnodes,
size_t nin_features,
size_t nout_features,
size_t nedge_type
)
{
int best_num_threads = pow(2, ceil(log(nin_features * nnodes)/log(2)));
int threads = best_num_threads > 512?512:best_num_threads ;
const dim3 blocks(batch_size, nnodes, nout_features);
switch(threads){
case 512:
run_kernel(512, scalar_t);
break;
case 256:
run_kernel(256, scalar_t);
break;
case 128:
run_kernel(128, scalar_t);
break;
case 64:
run_kernel(64, scalar_t);
break;
case 32:
run_kernel(32, scalar_t);
break;
case 16:
run_kernel(16, scalar_t);
break;
case 8:
run_kernel(8, scalar_t);
break;
case 4:
run_kernel(4, scalar_t);
break;
case 2:
run_kernel(2, scalar_t);
break;
case 1:
run_kernel(1, scalar_t);
break;
}
}
#undef run_kernel
template
void gconv_cuda_forward_runner(const double * feature,
const int * edge_type,
const double * filter,
double * res,
size_t batch_size,
size_t nnodes,
size_t nin_features,
size_t nout_features,
size_t nedge_type
);
template
void gconv_cuda_forward_runner(const float * feature,
const int * edge_type,
const float * filter,
float * res,
size_t batch_size,
size_t nnodes,
size_t nin_features,
size_t nout_features,
size_t nedge_type
);
| bfcc5581686a2f3b6f48470feb10b472ecae4142.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include "gconv.h"
#include "common.cuh"
template <int blockSize, typename scalar_t>
__global__ void gconv_cuda_forward_kernel(
const scalar_t* __restrict__ feature,
const int* __restrict__ edge_type,
const scalar_t* __restrict__ filter,
scalar_t* __restrict__ res,
size_t nin_feature,
size_t nedge_type
)
{
extern __shared__ int s [];
int batch_sz = blockIdx.x;
int node_id = blockIdx.y;
int nout = blockIdx.z;
int nnodes = gridDim.y;
int tid = threadIdx.x;
volatile scalar_t * partial_res = (scalar_t *) s;
const scalar_t* kernels = filter + nin_feature * nedge_type * nout;
const scalar_t* cfeature = feature + batch_sz * (nnodes * nin_feature) ;
const int * cedge_type = edge_type + batch_sz * nnodes * nnodes + node_id * nnodes;
scalar_t* cres = res + batch_sz * nnodes * gridDim.z + node_id * gridDim.z + nout;
partial_res[tid] = 0.0;
if(tid >= nin_feature * nnodes) return;
for(int i = tid; i < nin_feature * nnodes; i += blockDim.x){
int cnode_id = i / nin_feature;
int cfeature_id = i % nin_feature;
int edge_type_ = cedge_type[cnode_id];
if(edge_type_ >= 0){
partial_res[tid] += kernels[cfeature_id * nedge_type + edge_type_] * cfeature[i];
}
}
__syncthreads();
if (blockSize >= 512) {
if (tid < 256) { partial_res[tid] += partial_res[tid + 256]; }
__syncthreads(); }
if (blockSize >= 256) {
if (tid < 128) { partial_res[tid] += partial_res[tid + 128]; }
__syncthreads(); }
if (blockSize >= 128) {
if (tid < 64) { partial_res[tid] += partial_res[tid + 64]; }
__syncthreads(); }
if (tid < 32) warpReduceSum<blockSize, scalar_t>(partial_res, tid);
if (tid == 0){
cres[0] = partial_res[0];
}
}
#define run_kernel(bsize, type) gconv_cuda_forward_kernel<bsize, type><<<blocks, threads, threads * sizeof(type)>>> (feature, \
edge_type, \
filter, \
res, \
nin_features, \
nedge_type)
template <typename scalar_t>
void gconv_cuda_forward_runner(const scalar_t * feature,
const int * edge_type,
const scalar_t * filter,
scalar_t * res,
size_t batch_size,
size_t nnodes,
size_t nin_features,
size_t nout_features,
size_t nedge_type
)
{
int best_num_threads = pow(2, ceil(log(nin_features * nnodes)/log(2)));
int threads = best_num_threads > 512?512:best_num_threads ;
const dim3 blocks(batch_size, nnodes, nout_features);
switch(threads){
case 512:
run_kernel(512, scalar_t);
break;
case 256:
run_kernel(256, scalar_t);
break;
case 128:
run_kernel(128, scalar_t);
break;
case 64:
run_kernel(64, scalar_t);
break;
case 32:
run_kernel(32, scalar_t);
break;
case 16:
run_kernel(16, scalar_t);
break;
case 8:
run_kernel(8, scalar_t);
break;
case 4:
run_kernel(4, scalar_t);
break;
case 2:
run_kernel(2, scalar_t);
break;
case 1:
run_kernel(1, scalar_t);
break;
}
}
#undef run_kernel
template
void gconv_cuda_forward_runner(const double * feature,
const int * edge_type,
const double * filter,
double * res,
size_t batch_size,
size_t nnodes,
size_t nin_features,
size_t nout_features,
size_t nedge_type
);
template
void gconv_cuda_forward_runner(const float * feature,
const int * edge_type,
const float * filter,
float * res,
size_t batch_size,
size_t nnodes,
size_t nin_features,
size_t nout_features,
size_t nedge_type
);
|
627e30546d46f7fa756975cc87c7858da46ef21d.hip | // !!! This is a file automatically generated by hipify!!!
#include "WTDenUpdateKernel.cuh"
void UpdateWTDenKernel(WTD &argWTDen, WTAll &argWT, Document &argDoc, int argChunkId, int argStreamId, hipStream_t& stream) {
/*int numOfTokenD = argWTDen.numOfWordD;*/
/*unsigned int* deviceCounter;
hipMalloc(&deviceCounter, sizeof(unsigned int));*/
hipMemsetAsync(argDoc.deviceCounterWTDenUpdateKernel[argStreamId], 0, sizeof(unsigned int), stream);
/*hipMemcpyAsync(argDoc.deviceCounterWTDenUpdateKernel[argStreamId], &argDoc.counterWTDenUpdateKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/
WTDen_Update_Kernel << <GridDim, BlockDim, 0, stream >> >(argDoc.deviceTLTopic[argStreamId], argWTDen.deviceWTDense, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTOffset, argWTDen.numOfWordD, argDoc.deviceCounterWTDenUpdateKernel[argStreamId]);
/*H_ERR(hipDeviceSynchronize());*/
}
void UpdateWTDenRowSumKernel(WTD &argWTDen, WTAll &argWT, hipStream_t& stream)
{
WTDen_Sum_Update_Kernel << <GridDim, BlockDim, 0, stream >> >(argWTDen.deviceWTDense, argWT.deviceWTRowSum, argWT.deviceWTOffset, argWTDen.numOfWordD);
/*H_ERR(hipDeviceSynchronize());*/
}
| 627e30546d46f7fa756975cc87c7858da46ef21d.cu | #include "WTDenUpdateKernel.cuh"
void UpdateWTDenKernel(WTD &argWTDen, WTAll &argWT, Document &argDoc, int argChunkId, int argStreamId, cudaStream_t& stream) {
/*int numOfTokenD = argWTDen.numOfWordD;*/
/*unsigned int* deviceCounter;
cudaMalloc(&deviceCounter, sizeof(unsigned int));*/
cudaMemsetAsync(argDoc.deviceCounterWTDenUpdateKernel[argStreamId], 0, sizeof(unsigned int), stream);
/*cudaMemcpyAsync(argDoc.deviceCounterWTDenUpdateKernel[argStreamId], &argDoc.counterWTDenUpdateKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/
WTDen_Update_Kernel << <GridDim, BlockDim, 0, stream >> >(argDoc.deviceTLTopic[argStreamId], argWTDen.deviceWTDense, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTOffset, argWTDen.numOfWordD, argDoc.deviceCounterWTDenUpdateKernel[argStreamId]);
/*H_ERR(cudaDeviceSynchronize());*/
}
void UpdateWTDenRowSumKernel(WTD &argWTDen, WTAll &argWT, cudaStream_t& stream)
{
WTDen_Sum_Update_Kernel << <GridDim, BlockDim, 0, stream >> >(argWTDen.deviceWTDense, argWT.deviceWTRowSum, argWT.deviceWTOffset, argWTDen.numOfWordD);
/*H_ERR(cudaDeviceSynchronize());*/
}
|
d89cf5fc6187fa02a5a506715b7996c1e6d39da4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "VeloDefinitions.cuh"
#include "math_constants.h"
#include "ApplyPermutation.cuh"
/**
* @brief Calculates phi for each hit
*/
__device__ void sort_by_phi(
const uint event_hit_start,
const uint event_number_of_hits,
float* hit_Xs,
float* hit_Ys,
float* hit_Zs,
uint* hit_IDs,
int32_t* hit_temp,
uint* hit_permutations)
{
// Let's work with new pointers
// Note: It is important we populate later on in strictly
// the same order, to not lose data
float* new_hit_Xs = (float*) hit_temp;
float* new_hit_Ys = hit_Xs;
float* new_hit_Zs = hit_Ys;
uint* new_hit_IDs = (uint*) hit_Zs;
// Apply permutation across all arrays
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_Xs, new_hit_Xs);
__syncthreads();
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_Ys, new_hit_Ys);
__syncthreads();
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_Zs, new_hit_Zs);
__syncthreads();
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_IDs, new_hit_IDs);
}
| d89cf5fc6187fa02a5a506715b7996c1e6d39da4.cu | #include "VeloDefinitions.cuh"
#include "math_constants.h"
#include "ApplyPermutation.cuh"
/**
* @brief Calculates phi for each hit
*/
__device__ void sort_by_phi(
const uint event_hit_start,
const uint event_number_of_hits,
float* hit_Xs,
float* hit_Ys,
float* hit_Zs,
uint* hit_IDs,
int32_t* hit_temp,
uint* hit_permutations)
{
// Let's work with new pointers
// Note: It is important we populate later on in strictly
// the same order, to not lose data
float* new_hit_Xs = (float*) hit_temp;
float* new_hit_Ys = hit_Xs;
float* new_hit_Zs = hit_Ys;
uint* new_hit_IDs = (uint*) hit_Zs;
// Apply permutation across all arrays
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_Xs, new_hit_Xs);
__syncthreads();
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_Ys, new_hit_Ys);
__syncthreads();
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_Zs, new_hit_Zs);
__syncthreads();
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_IDs, new_hit_IDs);
}
|
28f41fb5311b704fcb06fb3eb5ba79e3679c0b17.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
//__global__ void kernel( void ) {
// does nothing
//}
int main(int argc, char** argv) {
// default the loop count to equal 1
int loopCount = 1;
// take in a command line arg to set the loop count
if(argc > 1){
loopCount = atoi(argv[1]);
}
// delcare two variables
int host_a;
int *dev_a;
// get the size of an int for the cuda malloc
int size = sizeof(int);
// malloc on the device
hipMalloc((void **)&dev_a, size);
// copy over to device
hipMemcpy(dev_a, &host_a, size, hipMemcpyHostToDevice);
// call an empty kernel
//kernel<<<1,1>>>();
// loop over the loop count and copy to host
for(int i = 0; i < loopCount; i++){
hipMemcpy(&host_a, dev_a, size, hipMemcpyDeviceToHost);
}
// free device memory
hipFree(dev_a);
// return with no errors
return 0;
}
| 28f41fb5311b704fcb06fb3eb5ba79e3679c0b17.cu | #include <stdio.h>
//__global__ void kernel( void ) {
// does nothing
//}
int main(int argc, char** argv) {
// default the loop count to equal 1
int loopCount = 1;
// take in a command line arg to set the loop count
if(argc > 1){
loopCount = atoi(argv[1]);
}
// delcare two variables
int host_a;
int *dev_a;
// get the size of an int for the cuda malloc
int size = sizeof(int);
// malloc on the device
cudaMalloc((void **)&dev_a, size);
// copy over to device
cudaMemcpy(dev_a, &host_a, size, cudaMemcpyHostToDevice);
// call an empty kernel
//kernel<<<1,1>>>();
// loop over the loop count and copy to host
for(int i = 0; i < loopCount; i++){
cudaMemcpy(&host_a, dev_a, size, cudaMemcpyDeviceToHost);
}
// free device memory
cudaFree(dev_a);
// return with no errors
return 0;
}
|
c165728dda5097c4e0f882762915d1b178c8eca3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
/**
* @brief Breadth-first traversal
* @param row CSR pointer array
* @param col CSR column array
* @param d Distance array
* @param rho Rho array
* @param p Dependency array
* @param cont Termination variable
* @param num_nodes Termination variable
* @param num_edges Termination variable
* @param dist Current traversal layer
*/
__global__ void
bfs_kernel(int *row, int *col, int *d, float *rho, int *cont,
const int num_nodes, const int num_edges, const int dist)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//navigate the current layer
if (tid < num_nodes && d[tid] == dist) {
//get the starting and ending pointers
//of the neighbor list
int start = row[tid];
int end;
if (tid + 1 < num_nodes)
end = row[tid + 1];
else
end = num_edges;
//navigate through the neighbor list
for (int edge = start; edge < end; edge++) {
int w = col[edge];
if (d[w] < 0) {
*cont = 1;
//traverse another layer
d[w] = dist + 1;
}
//transfer the rho value to the neighbor
if (d[w] == (dist + 1)) {
atomicAdd(&rho[w], rho[tid]);
}
}
}
}
/**
* @brief Back traversal
* @param row CSR pointer array
* @param col CSR column array
* @param d Distance array
* @param rho Rho array
* @param sigma Sigma array
* @param p Dependency array
* @param cont Termination variable
* @param num_nodes Termination variable
* @param num_edges Termination variable
* @param dist Current traversal layer
* @param s Source vertex
* @param bc Betweeness Centrality array
*/
__global__ void
backtrack_kernel(int *row, int *col, int *d, float *rho, float *sigma,
const int num_nodes, const int num_edges, const int dist,
const int s, float* bc)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Navigate the current layer
if (tid < num_nodes && d[tid] == dist - 1) {
int start = row[tid];
int end;
if (tid + 1 < num_nodes)
end = row[tid + 1];
else
end = num_edges;
// Get the starting and ending pointers
// of the neighbor list in the reverse graph
for (int edge = start; edge < end; edge++) {
int w = col[edge];
// Update the sigma value traversing back
if (d[w] == dist - 2)
atomicAdd(&sigma[w], rho[w] / rho[tid] * (1 + sigma[tid]));
}
// Update the BC value
if (tid != s)
bc[tid] = bc[tid] + sigma[tid];
}
}
/**
* @brief back_sum_kernel (not used)
* @param s Source vertex
* @param dist Current traversal layer
* @param d Distance array
* @param sigma Sigma array
* @param bc Betweeness Centrality array
* @param num_nodes Termination variable
* @param num_edges Termination variable
*/
__global__ void
back_sum_kernel(const int s, const int dist, int *d, float *sigma, float *bc,
const int num_nodes)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes) {
// If it is not the source
if (s != tid && d[tid] == dist - 1) {
bc[tid] = bc[tid] + sigma[tid];
}
}
}
/**
* @brief array set 1D
* @param s Source vertex
* @param dist_array Distance array
* @param sigma Sigma array
* @param rho Rho array
* @param num_nodes Termination variable
*/
__global__ void
clean_1d_array(const int source, int *dist_array, float *sigma, float *rho,
const int num_nodes)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes) {
sigma[tid] = 0;
if (tid == source) {
// If source vertex rho = 1, dist = 0
rho[tid] = 1;
dist_array[tid] = 0;
} else {
// If other vertices rho = 0, dist = -1
rho[tid] = 0;
dist_array[tid] = -1;
}
}
}
/**
* @brief array set 2D
* @param p Dependency array
* @param num_nodes Number of vertices
*/
__global__ void clean_2d_array(int *p, const int num_nodes)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes * num_nodes)
p[tid] = 0;
}
/**
* @brief clean BC
* @param bc_d Betweeness Centrality array
* @param num_nodes Number of vertices
*/
__global__ void clean_bc(float *bc_d, const int num_nodes)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes)
bc_d[tid] = 0;
}
| c165728dda5097c4e0f882762915d1b178c8eca3.cu | /************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
/**
* @brief Breadth-first traversal
* @param row CSR pointer array
* @param col CSR column array
* @param d Distance array
* @param rho Rho array
* @param p Dependency array
* @param cont Termination variable
* @param num_nodes Termination variable
* @param num_edges Termination variable
* @param dist Current traversal layer
*/
__global__ void
bfs_kernel(int *row, int *col, int *d, float *rho, int *cont,
const int num_nodes, const int num_edges, const int dist)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//navigate the current layer
if (tid < num_nodes && d[tid] == dist) {
//get the starting and ending pointers
//of the neighbor list
int start = row[tid];
int end;
if (tid + 1 < num_nodes)
end = row[tid + 1];
else
end = num_edges;
//navigate through the neighbor list
for (int edge = start; edge < end; edge++) {
int w = col[edge];
if (d[w] < 0) {
*cont = 1;
//traverse another layer
d[w] = dist + 1;
}
//transfer the rho value to the neighbor
if (d[w] == (dist + 1)) {
atomicAdd(&rho[w], rho[tid]);
}
}
}
}
/**
* @brief Back traversal
* @param row CSR pointer array
* @param col CSR column array
* @param d Distance array
* @param rho Rho array
* @param sigma Sigma array
* @param p Dependency array
* @param cont Termination variable
* @param num_nodes Termination variable
* @param num_edges Termination variable
* @param dist Current traversal layer
* @param s Source vertex
* @param bc Betweeness Centrality array
*/
__global__ void
backtrack_kernel(int *row, int *col, int *d, float *rho, float *sigma,
const int num_nodes, const int num_edges, const int dist,
const int s, float* bc)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Navigate the current layer
if (tid < num_nodes && d[tid] == dist - 1) {
int start = row[tid];
int end;
if (tid + 1 < num_nodes)
end = row[tid + 1];
else
end = num_edges;
// Get the starting and ending pointers
// of the neighbor list in the reverse graph
for (int edge = start; edge < end; edge++) {
int w = col[edge];
// Update the sigma value traversing back
if (d[w] == dist - 2)
atomicAdd(&sigma[w], rho[w] / rho[tid] * (1 + sigma[tid]));
}
// Update the BC value
if (tid != s)
bc[tid] = bc[tid] + sigma[tid];
}
}
/**
* @brief back_sum_kernel (not used)
* @param s Source vertex
* @param dist Current traversal layer
* @param d Distance array
* @param sigma Sigma array
* @param bc Betweeness Centrality array
* @param num_nodes Termination variable
* @param num_edges Termination variable
*/
__global__ void
back_sum_kernel(const int s, const int dist, int *d, float *sigma, float *bc,
const int num_nodes)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes) {
// If it is not the source
if (s != tid && d[tid] == dist - 1) {
bc[tid] = bc[tid] + sigma[tid];
}
}
}
/**
* @brief array set 1D
* @param s Source vertex
* @param dist_array Distance array
* @param sigma Sigma array
* @param rho Rho array
* @param num_nodes Termination variable
*/
__global__ void
clean_1d_array(const int source, int *dist_array, float *sigma, float *rho,
const int num_nodes)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes) {
sigma[tid] = 0;
if (tid == source) {
// If source vertex rho = 1, dist = 0
rho[tid] = 1;
dist_array[tid] = 0;
} else {
// If other vertices rho = 0, dist = -1
rho[tid] = 0;
dist_array[tid] = -1;
}
}
}
/**
* @brief array set 2D
* @param p Dependency array
* @param num_nodes Number of vertices
*/
__global__ void clean_2d_array(int *p, const int num_nodes)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes * num_nodes)
p[tid] = 0;
}
/**
* @brief clean BC
* @param bc_d Betweeness Centrality array
* @param num_nodes Number of vertices
*/
__global__ void clean_bc(float *bc_d, const int num_nodes)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes)
bc_d[tid] = 0;
}
|
e4cc7f839387daaba13e426271e79086338b414c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "common_magma.h"
#define BLOCK_SIZE 256
#define PRECISION_z
// initialize arrays with zero
__global__ void
magma_zgpumemzero(
magmaDoubleComplex * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<k; j++)
d[ i+j*n ] = MAGMA_Z_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_zdot_kernel(
int Gs,
int n,
magmaDoubleComplex * v,
magmaDoubleComplex * r,
magmaDoubleComplex * vtmp)
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_Z_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_zblockdot_kernel(
int Gs,
int n,
int k,
magmaDoubleComplex * v,
magmaDoubleComplex * r,
magmaDoubleComplex * vtmp)
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else {
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_zblockreduce_kernel(
int Gs,
int n,
int k,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_zreduce_kernel_fast( int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_zblockreduce_kernel_fast(
int Gs,
int n,
int k,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_Z_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaDoubleComplex_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaDoubleComplex_ptr
r
@param[in]
d1 magmaDoubleComplex_ptr
workspace
@param[in]
d2 magmaDoubleComplex_ptr
workspace
@param[out]
skp magmaDoubleComplex_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmdotc(
int n,
int k,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
// set queue for old dense routines
magma_queue_t orig_queue;
magmablasGetKernelStream( &orig_queue );
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( magmaDoubleComplex ); // k vecs
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
hipLaunchKernelGGL(( magma_zblockdot_kernel), dim3(Gs), dim3(Bs), Ms, queue, Gs.x, n, k, v, r, d1 );
}
else {
hipLaunchKernelGGL(( magma_zdot_kernel), dim3(Gs), dim3(Bs), Ms, queue, Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_zgpumemzero<<<Gs, Bs, 0>>>( d1, n*k,1 );
magma_zgpumemzero<<<Gs, Bs, 0>>>( d2, n*k,1 );
//magmablas_zlaset( MagmaUpperLower, n, k, d1, n );
//magmablas_zlaset( MagmaUpperLower, n, k, d2, n );
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x;
magma_zblockreduce_kernel<<< Gs_next.x, Bs.x, Ms >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_zcopyvector( 1, aux1+j*n, 1, skp+j, 1 );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_zblockreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue ,
Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_zreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
for( int j=0; j<k; j++) {
magma_zcopyvector_async( 1, aux1+j*n, 1, skp+j, 1, queue );
}
magmablasSetKernelStream( orig_queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaDoubleComplex_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaDoubleComplex_ptr
r
@param[in]
d1 magmaDoubleComplex_ptr
workspace
@param[in]
d2 magmaDoubleComplex_ptr
workspace
@param[out]
skp magmaDoubleComplex_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zgemvmdot(
int n,
int k,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_zmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_zmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
| e4cc7f839387daaba13e426271e79086338b414c.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "common_magma.h"
#define BLOCK_SIZE 256
#define PRECISION_z
// initialize arrays with zero
__global__ void
magma_zgpumemzero(
magmaDoubleComplex * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<k; j++)
d[ i+j*n ] = MAGMA_Z_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_zdot_kernel(
int Gs,
int n,
magmaDoubleComplex * v,
magmaDoubleComplex * r,
magmaDoubleComplex * vtmp)
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_Z_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_zblockdot_kernel(
int Gs,
int n,
int k,
magmaDoubleComplex * v,
magmaDoubleComplex * r,
magmaDoubleComplex * vtmp)
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else {
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_zblockreduce_kernel(
int Gs,
int n,
int k,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_zreduce_kernel_fast( int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_zblockreduce_kernel_fast(
int Gs,
int n,
int k,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_Z_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaDoubleComplex_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaDoubleComplex_ptr
r
@param[in]
d1 magmaDoubleComplex_ptr
workspace
@param[in]
d2 magmaDoubleComplex_ptr
workspace
@param[out]
skp magmaDoubleComplex_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmdotc(
int n,
int k,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
// set queue for old dense routines
magma_queue_t orig_queue;
magmablasGetKernelStream( &orig_queue );
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( magmaDoubleComplex ); // k vecs
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
magma_zblockdot_kernel<<<Gs, Bs, Ms, queue>>>( Gs.x, n, k, v, r, d1 );
}
else {
magma_zdot_kernel<<<Gs, Bs, Ms, queue>>>( Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_zgpumemzero<<<Gs, Bs, 0>>>( d1, n*k,1 );
magma_zgpumemzero<<<Gs, Bs, 0>>>( d2, n*k,1 );
//magmablas_zlaset( MagmaUpperLower, n, k, d1, n );
//magmablas_zlaset( MagmaUpperLower, n, k, d2, n );
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x;
magma_zblockreduce_kernel<<< Gs_next.x, Bs.x, Ms >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_zcopyvector( 1, aux1+j*n, 1, skp+j, 1 );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_zblockreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue >>>
( Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_zreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
for( int j=0; j<k; j++) {
magma_zcopyvector_async( 1, aux1+j*n, 1, skp+j, 1, queue );
}
magmablasSetKernelStream( orig_queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaDoubleComplex_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaDoubleComplex_ptr
r
@param[in]
d1 magmaDoubleComplex_ptr
workspace
@param[in]
d2 magmaDoubleComplex_ptr
workspace
@param[out]
skp magmaDoubleComplex_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zgemvmdot(
int n,
int k,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_zmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_zmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
|
4efc84412eee4e4a49b3326f8654160c62f29ff9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/selected_rows/adam_kernel.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/adam_functors.h"
#include "paddle/phi/kernels/funcs/for_range.h"
namespace phi {
namespace sr {
template <typename T>
__global__ void UpdateBetaPow(T beta1,
T beta2,
const T* beta1_pow_,
const T* beta2_pow_,
T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename MT>
__global__ void SparseAdamCUDAKernelREG(MT beta1,
MT beta2,
MT epsilon,
const MT beta1_pow,
const MT beta2_pow,
const MT* mom1_,
MT* mom1_out_,
const MT* mom2_,
MT* mom2_out_,
const MT* lr_,
const T* grad_,
const T* param_,
T* param_out_,
const MT* master_param,
MT* master_param_out,
const int64_t* rows_,
int64_t row_numel,
int64_t row_count,
bool lazy_mode,
int ndim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
MT lr = *lr_;
for (; id < ndim; id += blockDim.x * gridDim.x) {
auto row_idx =
phi::funcs::BinarySearch<int64_t>(rows_, row_count, id / row_numel);
if (lazy_mode && row_idx < 0) {
return;
} else {
MT mom1 = mom1_[id];
MT mom2 = mom2_[id];
MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]);
MT g = row_idx >= 0
? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel])
: static_cast<MT>(0);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
MT denom =
(sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon;
p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow)));
// Write back to global memory
mom1_out_[id] = mom1;
mom2_out_[id] = mom2;
param_out_[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
}
template <typename T, typename Context>
void AdamDenseParamSparseGradKernel(
const Context& dev_ctx,
const DenseTensor& param,
const SelectedRows& grad,
const DenseTensor& learning_rate,
const DenseTensor& moment1,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
bool lazy_mode,
int64_t min_row_size_to_use_multithread,
bool multi_precision,
bool use_global_beta_pow,
DenseTensor* param_out,
DenseTensor* moment1_out,
DenseTensor* moment2_out,
DenseTensor* beta1_pow_out,
DenseTensor* beta2_pow_out,
DenseTensor* master_param_outs) {
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
bool skip_update_ = false;
if (skip_update.is_initialized()) {
PADDLE_ENFORCE_EQ(
skip_update->numel(),
1,
errors::InvalidArgument("Input(SkipUpdate) size must be 1, but get %d",
skip_update->numel()));
std::vector<bool> skip_update_vec;
paddle::framework::TensorToVector(*skip_update, dev_ctx, &skip_update_vec);
skip_update_ = skip_update_vec[0];
}
// skip_update=true, just copy input to output, and TensorCopy will call
// mutable_data
if (skip_update_) {
VLOG(4) << "Adam skip update";
phi::Copy(dev_ctx, param, dev_ctx.GetPlace(), false, param_out);
phi::Copy(dev_ctx, moment1, dev_ctx.GetPlace(), false, moment1_out);
phi::Copy(dev_ctx, moment2, dev_ctx.GetPlace(), false, moment2_out);
phi::Copy(dev_ctx, beta1_pow, beta1_pow.place(), false, beta1_pow_out);
phi::Copy(dev_ctx, beta2_pow, beta2_pow.place(), false, beta2_pow_out);
return;
}
MPDType beta1_ = beta1.to<MPDType>();
MPDType beta2_ = beta2.to<MPDType>();
MPDType epsilon_ = epsilon.to<MPDType>();
VLOG(3) << "beta1_pow.numel() : " << beta1_pow.numel()
<< "beta2_pow.numel() : " << beta2_pow.numel();
VLOG(3) << "param.numel(): " << param.numel();
PADDLE_ENFORCE_EQ(
beta1_pow_out->numel(),
1,
errors::InvalidArgument("beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(
beta2_pow_out->numel(),
1,
errors::InvalidArgument("beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_outs)
: nullptr;
if (grad.rows().size() == 0) {
VLOG(3) << "grad row size is 0!!";
return;
}
std::vector<int64_t> cpu_rows(grad.rows().begin(), grad.rows().end());
bool is_strict_sorted = true;
for (size_t i = 1; i < cpu_rows.size(); ++i) {
if (cpu_rows[i - 1] >= cpu_rows[i]) {
is_strict_sorted = false;
break;
}
}
phi::SelectedRows tmp_grad_merge;
const phi::SelectedRows* grad_merge_ptr;
if (is_strict_sorted) {
grad_merge_ptr = &grad;
} else {
// merge duplicated rows if any.
// The rows of grad_merge have been sorted inside MergeAdd functor
paddle::operators::math::scatter::MergeAdd<Context, T> merge_func;
merge_func(dev_ctx, grad, &tmp_grad_merge, true);
grad_merge_ptr = &tmp_grad_merge;
}
auto& grad_merge = *grad_merge_ptr;
auto& grad_tensor = grad_merge.value();
const T* grad_data = grad_tensor.template data<T>();
auto* grad_merge_rows = &grad_merge.rows();
paddle::framework::MixVector<int64_t> mixv_grad_merge_rows(grad_merge_rows);
const int64_t* rows = mixv_grad_merge_rows.Data(dev_ctx.GetPlace());
auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
if (beta1_pow.place() == CPUPlace() && beta2_pow.place() == CPUPlace()) {
int threads = 512;
int ndim = param.numel();
int blocks = (ndim + threads - 1) / threads;
hipLaunchKernelGGL(( SparseAdamCUDAKernelREG<T, MPDType>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1_,
beta2_,
epsilon_,
*beta1_pow.data<MPDType>(),
*beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode,
ndim);
if (!use_global_beta_pow) {
// Update with cpu
dev_ctx.template HostAlloc<MPDType>(beta1_pow_out)[0] =
beta1_ * beta1_pow.data<MPDType>()[0];
dev_ctx.template HostAlloc<MPDType>(beta2_pow_out)[0] =
beta2_ * beta2_pow.data<MPDType>()[0];
}
} else {
funcs::SparseAdamFunctor<T, funcs::GPUAdam, MPDType> functor(
beta1_,
beta2_,
epsilon_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode);
// FIXME(minqiyang): remove BinarySearch in GPU later
funcs::ForRange<Context> for_range(dev_ctx, param.numel());
for_range(functor);
if (!use_global_beta_pow) {
// update beta1 and beta2
hipLaunchKernelGGL(( UpdateBetaPow<MPDType>), dim3(1), dim3(32), 0, dev_ctx.stream(),
beta1_,
beta2_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(beta1_pow_out),
dev_ctx.template Alloc<MPDType>(beta2_pow_out));
}
}
}
} // namespace sr
} // namespace phi
PD_REGISTER_KERNEL(adam_dense_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::sr::AdamDenseParamSparseGradKernel,
float,
double,
phi::dtype::float16) {
// Skip beta1_pow, beta2_pow, skip_update data transform
kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(8).SetBackend(phi::Backend::ALL_BACKEND);
}
| 4efc84412eee4e4a49b3326f8654160c62f29ff9.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/selected_rows/adam_kernel.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/adam_functors.h"
#include "paddle/phi/kernels/funcs/for_range.h"
namespace phi {
namespace sr {
template <typename T>
__global__ void UpdateBetaPow(T beta1,
T beta2,
const T* beta1_pow_,
const T* beta2_pow_,
T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename MT>
__global__ void SparseAdamCUDAKernelREG(MT beta1,
MT beta2,
MT epsilon,
const MT beta1_pow,
const MT beta2_pow,
const MT* mom1_,
MT* mom1_out_,
const MT* mom2_,
MT* mom2_out_,
const MT* lr_,
const T* grad_,
const T* param_,
T* param_out_,
const MT* master_param,
MT* master_param_out,
const int64_t* rows_,
int64_t row_numel,
int64_t row_count,
bool lazy_mode,
int ndim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
MT lr = *lr_;
for (; id < ndim; id += blockDim.x * gridDim.x) {
auto row_idx =
phi::funcs::BinarySearch<int64_t>(rows_, row_count, id / row_numel);
if (lazy_mode && row_idx < 0) {
return;
} else {
MT mom1 = mom1_[id];
MT mom2 = mom2_[id];
MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]);
MT g = row_idx >= 0
? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel])
: static_cast<MT>(0);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
MT denom =
(sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon;
p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow)));
// Write back to global memory
mom1_out_[id] = mom1;
mom2_out_[id] = mom2;
param_out_[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
}
template <typename T, typename Context>
void AdamDenseParamSparseGradKernel(
const Context& dev_ctx,
const DenseTensor& param,
const SelectedRows& grad,
const DenseTensor& learning_rate,
const DenseTensor& moment1,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
bool lazy_mode,
int64_t min_row_size_to_use_multithread,
bool multi_precision,
bool use_global_beta_pow,
DenseTensor* param_out,
DenseTensor* moment1_out,
DenseTensor* moment2_out,
DenseTensor* beta1_pow_out,
DenseTensor* beta2_pow_out,
DenseTensor* master_param_outs) {
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
bool skip_update_ = false;
if (skip_update.is_initialized()) {
PADDLE_ENFORCE_EQ(
skip_update->numel(),
1,
errors::InvalidArgument("Input(SkipUpdate) size must be 1, but get %d",
skip_update->numel()));
std::vector<bool> skip_update_vec;
paddle::framework::TensorToVector(*skip_update, dev_ctx, &skip_update_vec);
skip_update_ = skip_update_vec[0];
}
// skip_update=true, just copy input to output, and TensorCopy will call
// mutable_data
if (skip_update_) {
VLOG(4) << "Adam skip update";
phi::Copy(dev_ctx, param, dev_ctx.GetPlace(), false, param_out);
phi::Copy(dev_ctx, moment1, dev_ctx.GetPlace(), false, moment1_out);
phi::Copy(dev_ctx, moment2, dev_ctx.GetPlace(), false, moment2_out);
phi::Copy(dev_ctx, beta1_pow, beta1_pow.place(), false, beta1_pow_out);
phi::Copy(dev_ctx, beta2_pow, beta2_pow.place(), false, beta2_pow_out);
return;
}
MPDType beta1_ = beta1.to<MPDType>();
MPDType beta2_ = beta2.to<MPDType>();
MPDType epsilon_ = epsilon.to<MPDType>();
VLOG(3) << "beta1_pow.numel() : " << beta1_pow.numel()
<< "beta2_pow.numel() : " << beta2_pow.numel();
VLOG(3) << "param.numel(): " << param.numel();
PADDLE_ENFORCE_EQ(
beta1_pow_out->numel(),
1,
errors::InvalidArgument("beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(
beta2_pow_out->numel(),
1,
errors::InvalidArgument("beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_outs)
: nullptr;
if (grad.rows().size() == 0) {
VLOG(3) << "grad row size is 0!!";
return;
}
std::vector<int64_t> cpu_rows(grad.rows().begin(), grad.rows().end());
bool is_strict_sorted = true;
for (size_t i = 1; i < cpu_rows.size(); ++i) {
if (cpu_rows[i - 1] >= cpu_rows[i]) {
is_strict_sorted = false;
break;
}
}
phi::SelectedRows tmp_grad_merge;
const phi::SelectedRows* grad_merge_ptr;
if (is_strict_sorted) {
grad_merge_ptr = &grad;
} else {
// merge duplicated rows if any.
// The rows of grad_merge have been sorted inside MergeAdd functor
paddle::operators::math::scatter::MergeAdd<Context, T> merge_func;
merge_func(dev_ctx, grad, &tmp_grad_merge, true);
grad_merge_ptr = &tmp_grad_merge;
}
auto& grad_merge = *grad_merge_ptr;
auto& grad_tensor = grad_merge.value();
const T* grad_data = grad_tensor.template data<T>();
auto* grad_merge_rows = &grad_merge.rows();
paddle::framework::MixVector<int64_t> mixv_grad_merge_rows(grad_merge_rows);
const int64_t* rows = mixv_grad_merge_rows.Data(dev_ctx.GetPlace());
auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
if (beta1_pow.place() == CPUPlace() && beta2_pow.place() == CPUPlace()) {
int threads = 512;
int ndim = param.numel();
int blocks = (ndim + threads - 1) / threads;
SparseAdamCUDAKernelREG<T, MPDType>
<<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
epsilon_,
*beta1_pow.data<MPDType>(),
*beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode,
ndim);
if (!use_global_beta_pow) {
// Update with cpu
dev_ctx.template HostAlloc<MPDType>(beta1_pow_out)[0] =
beta1_ * beta1_pow.data<MPDType>()[0];
dev_ctx.template HostAlloc<MPDType>(beta2_pow_out)[0] =
beta2_ * beta2_pow.data<MPDType>()[0];
}
} else {
funcs::SparseAdamFunctor<T, funcs::GPUAdam, MPDType> functor(
beta1_,
beta2_,
epsilon_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode);
// FIXME(minqiyang): remove BinarySearch in GPU later
funcs::ForRange<Context> for_range(dev_ctx, param.numel());
for_range(functor);
if (!use_global_beta_pow) {
// update beta1 and beta2
UpdateBetaPow<MPDType><<<1, 32, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(beta1_pow_out),
dev_ctx.template Alloc<MPDType>(beta2_pow_out));
}
}
}
} // namespace sr
} // namespace phi
PD_REGISTER_KERNEL(adam_dense_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::sr::AdamDenseParamSparseGradKernel,
float,
double,
phi::dtype::float16) {
// Skip beta1_pow, beta2_pow, skip_update data transform
kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(8).SetBackend(phi::Backend::ALL_BACKEND);
}
|
6a0390bc58a0d6504d06ba34f8e5d84430dd9388.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// GPU kernel function for multiplication of input signal with cosine and sine function ....
__global__ void multi_sine_cosine(float *dev_sine, float *dev_cosine, float *dev_op_sine, float *dev_op_cosine, float *dev_input_host){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
float temp1, temp2;
temp1 = dev_input_host[idx] * dev_cosine[idx];
temp2 = dev_input_host[idx] * dev_sine[idx];
dev_op_cosine[idx] = temp1;
dev_op_sine[idx] = temp2;
}
// GPU kernel function for assigning LPF values as real(sine multiplication) and complex(cosine multiplication) ....
__global__ void comp(hipfftComplex *dev_comp, float *dev_op_sine_conv, float *dev_op_cosine_conv, int c){
int i;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(i = idx; i < c; i+=stride){
dev_comp[i].x = dev_op_cosine_conv[i];
dev_comp[i].y = -1*dev_op_sine_conv[i];
}
}
| 6a0390bc58a0d6504d06ba34f8e5d84430dd9388.cu | // GPU kernel function for multiplication of input signal with cosine and sine function ....
__global__ void multi_sine_cosine(float *dev_sine, float *dev_cosine, float *dev_op_sine, float *dev_op_cosine, float *dev_input_host){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
float temp1, temp2;
temp1 = dev_input_host[idx] * dev_cosine[idx];
temp2 = dev_input_host[idx] * dev_sine[idx];
dev_op_cosine[idx] = temp1;
dev_op_sine[idx] = temp2;
}
// GPU kernel function for assigning LPF values as real(sine multiplication) and complex(cosine multiplication) ....
__global__ void comp(cufftComplex *dev_comp, float *dev_op_sine_conv, float *dev_op_cosine_conv, int c){
int i;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(i = idx; i < c; i+=stride){
dev_comp[i].x = dev_op_cosine_conv[i];
dev_comp[i].y = -1*dev_op_sine_conv[i];
}
}
|
4f0136022e6fac8e4c6d089c89244119218269ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
/*
* data from one GPU to another, within the same node.
* By enabling peer-to-peer transfers, copies betwwen GPUs go directly over the PCIs bus.
* If P2P is not enabled, host memory must be used as a staging area of GPU-to-GPU cudaMemcpys.
* SingGPU is not run.
*/
inline bool isCapableP2P(int ngpus)
{
hipDeviceProp_t prop[ngpus];
int iCount = 0;
for (int i = 0; i < ngpus; i++)
{
CHECK(hipGetDeviceProperties(&prop[i], i));
if (prop[i].major >= 2) iCount++;
printf("> GPU%d: %s %s capable of Peer-to-Peer access\n",i,
prop[i].name, (prop[i].major >= 2 ? "is" : "not"));
}
if (iCount != ngpus)
{
printf("> no enough device to run this application\n");
}
return (iCount == ngpus);
}
/*
* enable P2P memcopies between GPUs (all GPU must be compute capability 2.0 or later)
*/
inline void enableP2P(int ngpus)
{
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
for (int j = 0; j < ngpus; j++)
{
if (i == j) continue;
int peer_access_available = 0;
CHECK(hipDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available)
{
CHECK(hipDeviceEnablePeerAccess(j, 0));
printf("> GPU%d enabled direct access to GPU%d\n", i, j);
}
else
{
printf("(%d, %d)\n", i, j);
}
}
}
}
void initialData(float *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip[i] = (float)rand() / (float)RAND_MAX;
}
}
inline void disableP2P(int ngpus)
{
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
for (int j = 0; j < ngpus; j++)
{
if (i == j) continue;
int peer_access_available = 0;
CHECK(hipDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available)
{
CHECK(hipDeviceDisablePeerAccess(j));
printf("> GPU%d disabled direct access to GPU%d\n", i, j);
}
}
}
}
int main(int argc, char **argv)
{
int ngpus;
// check device count
CHECK(hipGetDeviceCount(&ngpus));
printf("> CUDA-capable device count: %i\n", ngpus);
// check p2p capability
isCapableP2P(ngpus);
// get ngpus from command line
if (argc > 1)
{
if (atoi(argv[1]) > ngpus)
{
fprintf(stderr, "Invalid number of GPUs specified: %d is greater"
"than the total number of GPUs in this platform(%d)\n",
atoi(argv[1]), ngpus);
return 1;
}
ngpus = atoi(argv[1]);
}
if (ngpus > 2)
{
fprintf(stderr, "No more than 2 GPUs supported\n");
return 1;
}
if (ngpus > 1) enableP2P(ngpus);
// Allocate buffers
int iSize = 1024 * 1024 * 16;
const size_t iBytes = iSize * sizeof(float);
printf("\nAllocating buffers (%iMB on each GPU and CPU Host)...\n",
int(iBytes / 1024 / 1024));
float **d_src = (float **)malloc(sizeof(float) * ngpus);
float **d_rcv = (float **)malloc(sizeof(float) * ngpus);
float **h_src = (float **)malloc(sizeof(float) * ngpus);
hipStream_t *stream = (hipStream_t *)malloc(sizeof(hipStream_t) * ngpus);
// create CUDA evnet handles
hipEvent_t start, stop;
CHECK(hipSetDevice(0));
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stop));
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
CHECK(hipMalloc(&d_src[i], iBytes));
CHECK(hipMalloc(&d_rcv[i], iBytes));
CHECK(hipHostMalloc((void **)&h_src[i], iBytes));
CHECK(hipStreamCreate(&stream[i]));
}
for (int i = 0; i < ngpus; i++)
{
initialData(h_src[i], iSize);
}
// unidirectional gmem copy
CHECK(hipSetDevice(0));
CHECK(hipEventRecord(start, 0));
for (int i = 0; i < 100; i++)
{
if (i % 2 == 0)
{
CHECK(hipMemcpy(d_src[1], d_src[0], iBytes, hipMemcpyDeviceToDevice));
}
else
{
CHECK(hipMemcpy(d_src[0], d_src[1], iBytes, hipMemcpyDeviceToDevice));
}
}
CHECK(hipSetDevice(0));
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
float elapsed_time_ms;
CHECK(hipEventElapsedTime(&elapsed_time_ms, start, stop));
elapsed_time_ms /= 100.0f;
printf("Ping-pong unidirectional hipMemcpy:\t\t %8.2f ms", elapsed_time_ms);
printf("performance: %8.2f GB/s\n", (float)iBytes / (elapsed_time_ms * 1e6f));
// bidirectional asynchronous gmem copy
CHECK(hipEventRecord(start, 0));
for (int i = 0; i < 100; i++)
{
CHECK(hipMemcpyAsync(d_src[1], d_src[0], iBytes, hipMemcpyDeviceToDevice, stream[0]));
CHECK(hipMemcpyAsync(d_rcv[0], d_rcv[1], iBytes, hipMemcpyDeviceToDevice, stream[1]));
}
CHECK(hipSetDevice(0));
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
elapsed_time_ms = 0.0f;
CHECK(hipEventElapsedTime(&elapsed_time_ms, start, stop));
elapsed_time_ms /= 100.0f;
printf("Ping-pong bidirectional hipMemcpyAsync:\t %8.2fms", elapsed_time_ms);
printf("performance: %8.2f GB/s\n", (float)2.0f * iBytes / (elapsed_time_ms * 1e6f));
disableP2P(ngpus);
// free memory
CHECK(hipSetDevice(0));
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
CHECK(hipFree(d_src[i]));
CHECK(hipFree(d_rcv[i]));
CHECK(hipStreamDestroy(stream[i]));
CHECK(hipDeviceReset());
}
exit(EXIT_SUCCESS);
} | 4f0136022e6fac8e4c6d089c89244119218269ba.cu | #include "../common/common.h"
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
/*
* data from one GPU to another, within the same node.
* By enabling peer-to-peer transfers, copies betwwen GPUs go directly over the PCIs bus.
* If P2P is not enabled, host memory must be used as a staging area of GPU-to-GPU cudaMemcpys.
* SingGPU is not run.
*/
inline bool isCapableP2P(int ngpus)
{
cudaDeviceProp prop[ngpus];
int iCount = 0;
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaGetDeviceProperties(&prop[i], i));
if (prop[i].major >= 2) iCount++;
printf("> GPU%d: %s %s capable of Peer-to-Peer access\n",i,
prop[i].name, (prop[i].major >= 2 ? "is" : "not"));
}
if (iCount != ngpus)
{
printf("> no enough device to run this application\n");
}
return (iCount == ngpus);
}
/*
* enable P2P memcopies between GPUs (all GPU must be compute capability 2.0 or later)
*/
inline void enableP2P(int ngpus)
{
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
for (int j = 0; j < ngpus; j++)
{
if (i == j) continue;
int peer_access_available = 0;
CHECK(cudaDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available)
{
CHECK(cudaDeviceEnablePeerAccess(j, 0));
printf("> GPU%d enabled direct access to GPU%d\n", i, j);
}
else
{
printf("(%d, %d)\n", i, j);
}
}
}
}
void initialData(float *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip[i] = (float)rand() / (float)RAND_MAX;
}
}
inline void disableP2P(int ngpus)
{
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
for (int j = 0; j < ngpus; j++)
{
if (i == j) continue;
int peer_access_available = 0;
CHECK(cudaDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available)
{
CHECK(cudaDeviceDisablePeerAccess(j));
printf("> GPU%d disabled direct access to GPU%d\n", i, j);
}
}
}
}
int main(int argc, char **argv)
{
int ngpus;
// check device count
CHECK(cudaGetDeviceCount(&ngpus));
printf("> CUDA-capable device count: %i\n", ngpus);
// check p2p capability
isCapableP2P(ngpus);
// get ngpus from command line
if (argc > 1)
{
if (atoi(argv[1]) > ngpus)
{
fprintf(stderr, "Invalid number of GPUs specified: %d is greater"
"than the total number of GPUs in this platform(%d)\n",
atoi(argv[1]), ngpus);
return 1;
}
ngpus = atoi(argv[1]);
}
if (ngpus > 2)
{
fprintf(stderr, "No more than 2 GPUs supported\n");
return 1;
}
if (ngpus > 1) enableP2P(ngpus);
// Allocate buffers
int iSize = 1024 * 1024 * 16;
const size_t iBytes = iSize * sizeof(float);
printf("\nAllocating buffers (%iMB on each GPU and CPU Host)...\n",
int(iBytes / 1024 / 1024));
float **d_src = (float **)malloc(sizeof(float) * ngpus);
float **d_rcv = (float **)malloc(sizeof(float) * ngpus);
float **h_src = (float **)malloc(sizeof(float) * ngpus);
cudaStream_t *stream = (cudaStream_t *)malloc(sizeof(cudaStream_t) * ngpus);
// create CUDA evnet handles
cudaEvent_t start, stop;
CHECK(cudaSetDevice(0));
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
CHECK(cudaMalloc(&d_src[i], iBytes));
CHECK(cudaMalloc(&d_rcv[i], iBytes));
CHECK(cudaMallocHost((void **)&h_src[i], iBytes));
CHECK(cudaStreamCreate(&stream[i]));
}
for (int i = 0; i < ngpus; i++)
{
initialData(h_src[i], iSize);
}
// unidirectional gmem copy
CHECK(cudaSetDevice(0));
CHECK(cudaEventRecord(start, 0));
for (int i = 0; i < 100; i++)
{
if (i % 2 == 0)
{
CHECK(cudaMemcpy(d_src[1], d_src[0], iBytes, cudaMemcpyDeviceToDevice));
}
else
{
CHECK(cudaMemcpy(d_src[0], d_src[1], iBytes, cudaMemcpyDeviceToDevice));
}
}
CHECK(cudaSetDevice(0));
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
float elapsed_time_ms;
CHECK(cudaEventElapsedTime(&elapsed_time_ms, start, stop));
elapsed_time_ms /= 100.0f;
printf("Ping-pong unidirectional cudaMemcpy:\t\t %8.2f ms", elapsed_time_ms);
printf("performance: %8.2f GB/s\n", (float)iBytes / (elapsed_time_ms * 1e6f));
// bidirectional asynchronous gmem copy
CHECK(cudaEventRecord(start, 0));
for (int i = 0; i < 100; i++)
{
CHECK(cudaMemcpyAsync(d_src[1], d_src[0], iBytes, cudaMemcpyDeviceToDevice, stream[0]));
CHECK(cudaMemcpyAsync(d_rcv[0], d_rcv[1], iBytes, cudaMemcpyDeviceToDevice, stream[1]));
}
CHECK(cudaSetDevice(0));
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
elapsed_time_ms = 0.0f;
CHECK(cudaEventElapsedTime(&elapsed_time_ms, start, stop));
elapsed_time_ms /= 100.0f;
printf("Ping-pong bidirectional cudaMemcpyAsync:\t %8.2fms", elapsed_time_ms);
printf("performance: %8.2f GB/s\n", (float)2.0f * iBytes / (elapsed_time_ms * 1e6f));
disableP2P(ngpus);
// free memory
CHECK(cudaSetDevice(0));
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
CHECK(cudaFree(d_src[i]));
CHECK(cudaFree(d_rcv[i]));
CHECK(cudaStreamDestroy(stream[i]));
CHECK(cudaDeviceReset());
}
exit(EXIT_SUCCESS);
} |
3abaaa0fcf7f9c5fd88e767668bf69a4343a4df0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ __device__ void render(float4* d_debug_float4, uint* d_debug_uint, uint * result, Node * dnode, uint imageW, uint imageH, float pas, float df)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint tid(__umul24(threadIdx.y, blockDim.x) + threadIdx.x);
uint id=(x + y * imageW);
float4 pile[5];
uint Obj, nRec(5), n(0);
float prof, tmp;
for( int i(0); i < nRec; ++i )
pile[i] = make_float4(0.0f,0.0f,0.0f,1.0f);
if( x < imageW && y < imageH )
{
prof = 10000.0f;
result[id] = 0;
float tPixel(2.0f/float(min(imageW,imageH)));
float4 f(make_float4(0.0f,0.0f,0.0f,1.0f));
matrice3x4 M(MView);
Rayon R;
R.A = make_float3(M.m[0].w,M.m[1].w,M.m[2].w);
R.u = make_float3(M.m[0])*df
+ make_float3(M.m[2])*(float(x)-float(imageW)*0.5f)*tPixel
+ make_float3(M.m[1])*(float(y)-float(imageH)*0.5f)*tPixel;
R.u = normalize(R.u);
__syncthreads();
for( int i(0); i < nRec && n == i; i++ ) {
for( int j(0); j < numObj; j++ ) {
Node nod(cnode[j]);
Sphere s(nod.s);
float t;
s.C.x += pas;
if( nod.fg )
t = intersectionPlan(R,s.C,s.C);
else
t = intersectionSphere(R,s.C,s.r);
if( t > 0.0f && t < prof ) {
prof = t;
Obj = j;
}
}
float t = prof;
if( t > 0.0f && t < 10000.0f ) {
n++;
Node nod(cnode[Obj]);
Sphere s(nod.s);
s.C.x += pas;
float4 color(make_float4(s.R,s.V,s.B,s.A));
float3 P(R.A+R.u*t), L(normalize(make_float3(10.0f,10.0f,10.0f)-P)), V(normalize(R.A-P));
float3 N(nod.fg?getNormaleP(P):getNormale(P,s.C));
float3 Np(dot(V,N)<0.0f?(-1*N):N);
pile[i] = 0.05f * color;
if( dot(Np,L) > 0.0f && notShadowRay(cnode,P,L,pas) ) {
float3 Ri(normalize(L+V));
//Ri = (L+V)/normalize(L+V);
pile[i] += 0.3f * color* (min(1.0f,dot(Np,L)));
#ifdef FIXED_CONST_PARSE
tmp = 0.8f * pow(max(0.0f,min(1.0f,dot(Np,Ri))),50.0f);
#else
tmp = 0.8f * float2int_pow50(max(0.0f,min(1.0f,dot(Np,Ri))));
#endif
pile[i].x += tmp;
pile[i].y += tmp;
pile[i].z += tmp;
}
R.u = 2.0f*N*dot(N,V) - V;
R.u = normalize(R.u);
R.A = P+R.u*0.0001f;
}
prof = 10000.0f;
}
for( int i(n-1); i > 0; i-- )
pile[i-1] = pile[i-1] + 0.8f*pile[i];
result[id] += rgbaFloatToInt(pile[0]);
}
}
| 3abaaa0fcf7f9c5fd88e767668bf69a4343a4df0.cu | __global__ __device__ void render(float4* d_debug_float4, uint* d_debug_uint, uint * result, Node * dnode, uint imageW, uint imageH, float pas, float df)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint tid(__umul24(threadIdx.y, blockDim.x) + threadIdx.x);
uint id=(x + y * imageW);
float4 pile[5];
uint Obj, nRec(5), n(0);
float prof, tmp;
for( int i(0); i < nRec; ++i )
pile[i] = make_float4(0.0f,0.0f,0.0f,1.0f);
if( x < imageW && y < imageH )
{
prof = 10000.0f;
result[id] = 0;
float tPixel(2.0f/float(min(imageW,imageH)));
float4 f(make_float4(0.0f,0.0f,0.0f,1.0f));
matrice3x4 M(MView);
Rayon R;
R.A = make_float3(M.m[0].w,M.m[1].w,M.m[2].w);
R.u = make_float3(M.m[0])*df
+ make_float3(M.m[2])*(float(x)-float(imageW)*0.5f)*tPixel
+ make_float3(M.m[1])*(float(y)-float(imageH)*0.5f)*tPixel;
R.u = normalize(R.u);
__syncthreads();
for( int i(0); i < nRec && n == i; i++ ) {
for( int j(0); j < numObj; j++ ) {
Node nod(cnode[j]);
Sphere s(nod.s);
float t;
s.C.x += pas;
if( nod.fg )
t = intersectionPlan(R,s.C,s.C);
else
t = intersectionSphere(R,s.C,s.r);
if( t > 0.0f && t < prof ) {
prof = t;
Obj = j;
}
}
float t = prof;
if( t > 0.0f && t < 10000.0f ) {
n++;
Node nod(cnode[Obj]);
Sphere s(nod.s);
s.C.x += pas;
float4 color(make_float4(s.R,s.V,s.B,s.A));
float3 P(R.A+R.u*t), L(normalize(make_float3(10.0f,10.0f,10.0f)-P)), V(normalize(R.A-P));
float3 N(nod.fg?getNormaleP(P):getNormale(P,s.C));
float3 Np(dot(V,N)<0.0f?(-1*N):N);
pile[i] = 0.05f * color;
if( dot(Np,L) > 0.0f && notShadowRay(cnode,P,L,pas) ) {
float3 Ri(normalize(L+V));
//Ri = (L+V)/normalize(L+V);
pile[i] += 0.3f * color* (min(1.0f,dot(Np,L)));
#ifdef FIXED_CONST_PARSE
tmp = 0.8f * pow(max(0.0f,min(1.0f,dot(Np,Ri))),50.0f);
#else
tmp = 0.8f * float2int_pow50(max(0.0f,min(1.0f,dot(Np,Ri))));
#endif
pile[i].x += tmp;
pile[i].y += tmp;
pile[i].z += tmp;
}
R.u = 2.0f*N*dot(N,V) - V;
R.u = normalize(R.u);
R.A = P+R.u*0.0001f;
}
prof = 10000.0f;
}
for( int i(n-1); i > 0; i-- )
pile[i-1] = pile[i-1] + 0.8f*pile[i];
result[id] += rgbaFloatToInt(pile[0]);
}
}
|
61faf87c2dbeb86c2a8a93d8e2c9f4dfad70c6b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of BlockLoad and BlockStore utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <iterator>
#include <stdio.h>
#include <hipcub/hipcub.hpp>
#include <cub/block/block_store.cuh>
#include <cub/iterator/cache_modified_input_iterator.cuh>
#include <cub/iterator/cache_modified_output_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include <hipcub/hipcub.hpp>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/**
* Test load/store kernel.
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
typename InputIteratorT,
typename OutputIteratorT>
__launch_bounds__ (BLOCK_THREADS, 1)
__global__ void Kernel(
InputIteratorT d_in,
OutputIteratorT d_out_unguarded,
OutputIteratorT d_out_guarded,
int num_items)
{
enum
{
TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD
};
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// Threadblock load/store abstraction types
typedef BlockLoad<InputT, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM> BlockLoad;
typedef BlockStore<OutputT, BLOCK_THREADS, ITEMS_PER_THREAD, STORE_ALGORITHM> BlockStore;
// Shared memory type for this thread block
union TempStorage
{
typename BlockLoad::TempStorage load;
typename BlockStore::TempStorage store;
};
// Allocate temp storage in shared memory
__shared__ TempStorage temp_storage;
// Threadblock work bounds
int block_offset = blockIdx.x * TILE_SIZE;
int guarded_elements = num_items - block_offset;
// Tile of items
OutputT data[ITEMS_PER_THREAD];
// Load data
BlockLoad(temp_storage.load).Load(d_in + block_offset, data);
__syncthreads();
// Store data
BlockStore(temp_storage.store).Store(d_out_unguarded + block_offset, data);
__syncthreads();
// reset data
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
data[ITEM] = OutputT();
__syncthreads();
// Load data
BlockLoad(temp_storage.load).Load(d_in + block_offset, data, guarded_elements);
__syncthreads();
// Store data
BlockStore(temp_storage.store).Store(d_out_guarded + block_offset, data, guarded_elements);
}
//---------------------------------------------------------------------
// Host testing subroutines
//---------------------------------------------------------------------
/**
* Test load/store variants
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
typename InputIteratorT,
typename OutputIteratorT>
void TestKernel(
T *h_in,
InputIteratorT d_in,
OutputIteratorT d_out_unguarded_itr,
OutputIteratorT d_out_guarded_itr,
T *d_out_unguarded_ptr,
T *d_out_guarded_ptr,
int grid_size,
int guarded_elements)
{
int compare;
int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD;
// Test with discard output iterator
typedef typename std::iterator_traits<InputIteratorT>::difference_type OffsetT;
DiscardOutputIterator<OffsetT> discard_itr;
hipLaunchKernelGGL(( Kernel<BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>)
, dim3(grid_size), dim3(BLOCK_THREADS), 0, 0,
d_in,
discard_itr,
discard_itr,
guarded_elements);
// Test with regular output iterator
hipLaunchKernelGGL(( Kernel<BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>)
, dim3(grid_size), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_out_unguarded_itr,
d_out_guarded_itr,
guarded_elements);
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Check results
compare = CompareDeviceResults(h_in, d_out_guarded_ptr, guarded_elements, g_verbose, g_verbose);
printf("\tGuarded: %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Check results
compare = CompareDeviceResults(h_in, d_out_unguarded_ptr, unguarded_elements, g_verbose, g_verbose);
printf("\tUnguarded: %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
}
/**
* Test native pointer. Specialized for sufficient resources
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestNative(
int grid_size,
float fraction_valid,
Int2Type<true> sufficient_resources)
{
int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD;
int guarded_elements = int(fraction_valid * float(unguarded_elements));
// Allocate host arrays
T *h_in = (T*) malloc(unguarded_elements * sizeof(T));
// Allocate device arrays
T *d_in = NULL;
T *d_out_unguarded = NULL;
T *d_out_guarded = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_unguarded, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_guarded, sizeof(T) * guarded_elements));
CubDebugExit(hipMemset(d_out_unguarded, 0, sizeof(T) * unguarded_elements));
CubDebugExit(hipMemset(d_out_guarded, 0, sizeof(T) * guarded_elements));
// Initialize problem on host and device
for (int i = 0; i < unguarded_elements; ++i)
{
InitValue(INTEGER_SEED, h_in[i], i);
}
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * unguarded_elements, hipMemcpyHostToDevice));
printf("TestNative "
"grid_size(%d) "
"guarded_elements(%d) "
"unguarded_elements(%d) "
"BLOCK_THREADS(%d) "
"ITEMS_PER_THREAD(%d) "
"LOAD_ALGORITHM(%d) "
"STORE_ALGORITHM(%d) "
"sizeof(T)(%d)\n",
grid_size, guarded_elements, unguarded_elements, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, (int) sizeof(T));
TestKernel<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>(
h_in,
(T const *) d_in, // Test const
d_out_unguarded,
d_out_guarded,
d_out_unguarded,
d_out_guarded,
grid_size,
guarded_elements);
// Cleanup
if (h_in) free(h_in);
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out_unguarded) CubDebugExit(g_allocator.DeviceFree(d_out_unguarded));
if (d_out_guarded) CubDebugExit(g_allocator.DeviceFree(d_out_guarded));
}
/**
* Test native pointer. Specialized for insufficient resources
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestNative(
int grid_size,
float fraction_valid,
Int2Type<false> sufficient_resources)
{}
/**
* Test iterator. Specialized for sufficient resources.
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
CacheLoadModifier LOAD_MODIFIER,
CacheStoreModifier STORE_MODIFIER>
void TestIterator(
int grid_size,
float fraction_valid,
Int2Type<true> sufficient_resources)
{
int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD;
int guarded_elements = int(fraction_valid * float(unguarded_elements));
// Allocate host arrays
T *h_in = (T*) malloc(unguarded_elements * sizeof(T));
// Allocate device arrays
T *d_in = NULL;
T *d_out_unguarded = NULL;
T *d_out_guarded = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_unguarded, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_guarded, sizeof(T) * guarded_elements));
CubDebugExit(hipMemset(d_out_unguarded, 0, sizeof(T) * unguarded_elements));
CubDebugExit(hipMemset(d_out_guarded, 0, sizeof(T) * guarded_elements));
// Initialize problem on host and device
for (int i = 0; i < unguarded_elements; ++i)
{
InitValue(INTEGER_SEED, h_in[i], i);
}
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * unguarded_elements, hipMemcpyHostToDevice));
printf("TestIterator "
"grid_size(%d) "
"guarded_elements(%d) "
"unguarded_elements(%d) "
"BLOCK_THREADS(%d) "
"ITEMS_PER_THREAD(%d) "
"LOAD_ALGORITHM(%d) "
"STORE_ALGORITHM(%d) "
"LOAD_MODIFIER(%d) "
"STORE_MODIFIER(%d) "
"sizeof(T)(%d)\n",
grid_size, guarded_elements, unguarded_elements, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, LOAD_MODIFIER, STORE_MODIFIER, (int) sizeof(T));
TestKernel<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>(
h_in,
CacheModifiedInputIterator<LOAD_MODIFIER, T>(d_in),
CacheModifiedOutputIterator<STORE_MODIFIER, T>(d_out_unguarded),
CacheModifiedOutputIterator<STORE_MODIFIER, T>(d_out_guarded),
d_out_unguarded,
d_out_guarded,
grid_size,
guarded_elements);
// Cleanup
if (h_in) free(h_in);
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out_unguarded) CubDebugExit(g_allocator.DeviceFree(d_out_unguarded));
if (d_out_guarded) CubDebugExit(g_allocator.DeviceFree(d_out_guarded));
}
/**
* Test iterator. Specialized for insufficient resources.
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
CacheLoadModifier LOAD_MODIFIER,
CacheStoreModifier STORE_MODIFIER>
void TestIterator(
int grid_size,
float fraction_valid,
Int2Type<false> sufficient_resources)
{}
/**
* Evaluate different pointer access types
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestPointerType(
int grid_size,
float fraction_valid)
{
// Threadblock load/store abstraction types
typedef BlockLoad<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM> BlockLoad;
typedef BlockStore<T, BLOCK_THREADS, ITEMS_PER_THREAD, STORE_ALGORITHM> BlockStore;
#if defined(SM100) || defined(SM110) || defined(SM130)
static const bool sufficient_load_smem = sizeof(typename BlockLoad::TempStorage) <= 1024 * 16;
static const bool sufficient_store_smem = sizeof(typename BlockStore::TempStorage) <= 1024 * 16;
static const bool sufficient_threads = BLOCK_THREADS <= 512;
#else
static const bool sufficient_load_smem = sizeof(typename BlockLoad::TempStorage) <= 1024 * 48;
static const bool sufficient_store_smem = sizeof(typename BlockStore::TempStorage) <= 1024 * 48;
static const bool sufficient_threads = BLOCK_THREADS <= 1024;
#endif
static const bool sufficient_resources = sufficient_load_smem && sufficient_store_smem && sufficient_threads;
TestNative<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>(grid_size, fraction_valid, Int2Type<sufficient_resources>());
TestIterator<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, LOAD_DEFAULT, STORE_DEFAULT>(grid_size, fraction_valid, Int2Type<sufficient_resources>());
}
/**
* Evaluate different time-slicing strategies
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestSlicedStrategy(
int grid_size,
float fraction_valid)
{
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, true>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, false>(grid_size, fraction_valid);
}
/**
* Evaluate different load/store strategies (specialized for block sizes that are not a multiple of 32)
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD>
void TestStrategy(
int grid_size,
float fraction_valid,
Int2Type<false> is_warp_multiple)
{
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_DIRECT, BLOCK_STORE_DIRECT>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_TRANSPOSE, BLOCK_STORE_TRANSPOSE>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_VECTORIZE, BLOCK_STORE_VECTORIZE>(grid_size, fraction_valid);
}
/**
* Evaluate different load/store strategies (specialized for block sizes that are a multiple of 32)
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD>
void TestStrategy(
int grid_size,
float fraction_valid,
Int2Type<true> is_warp_multiple)
{
TestStrategy<T, BLOCK_THREADS, ITEMS_PER_THREAD>(grid_size, fraction_valid, Int2Type<false>());
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED, BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED>(grid_size, fraction_valid);
}
/**
* Evaluate different register blocking
*/
template <
typename T,
int BLOCK_THREADS>
void TestItemsPerThread(
int grid_size,
float fraction_valid)
{
Int2Type<BLOCK_THREADS % 32 == 0> is_warp_multiple;
TestStrategy<T, BLOCK_THREADS, 1>(grid_size, fraction_valid, is_warp_multiple);
TestStrategy<T, BLOCK_THREADS, 3>(grid_size, fraction_valid, is_warp_multiple);
TestStrategy<T, BLOCK_THREADS, 4>(grid_size, fraction_valid, is_warp_multiple);
TestStrategy<T, BLOCK_THREADS, 11>(grid_size, fraction_valid, is_warp_multiple);
}
/**
* Evaluate different thread block sizes
*/
template <typename T>
void TestThreads(
int grid_size,
float fraction_valid)
{
TestItemsPerThread<T, 15>(grid_size, fraction_valid);
TestItemsPerThread<T, 32>(grid_size, fraction_valid);
TestItemsPerThread<T, 72>(grid_size, fraction_valid);
TestItemsPerThread<T, 96>(grid_size, fraction_valid);
TestItemsPerThread<T, 128>(grid_size, fraction_valid);
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Get ptx version
int ptx_version;
CubDebugExit(PtxVersion(ptx_version));
#ifdef QUICK_TEST
// Compile/run quick tests
TestNative< int, 64, 2, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE>(1, 0.8f, Int2Type<true>());
TestIterator< int, 64, 2, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE, LOAD_DEFAULT, STORE_DEFAULT>(1, 0.8f, Int2Type<true>());
#else
// Compile/run thorough tests
TestThreads<char>(2, 0.8f);
TestThreads<int>(2, 0.8f);
TestThreads<long>(2, 0.8f);
TestThreads<long2>(2, 0.8f);
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
TestThreads<double2>(2, 0.8f);
TestThreads<TestFoo>(2, 0.8f);
TestThreads<TestBar>(2, 0.8f);
#endif
return 0;
}
| 61faf87c2dbeb86c2a8a93d8e2c9f4dfad70c6b2.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of BlockLoad and BlockStore utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <iterator>
#include <stdio.h>
#include <cub/block/block_load.cuh>
#include <cub/block/block_store.cuh>
#include <cub/iterator/cache_modified_input_iterator.cuh>
#include <cub/iterator/cache_modified_output_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include <cub/util_allocator.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/**
* Test load/store kernel.
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
typename InputIteratorT,
typename OutputIteratorT>
__launch_bounds__ (BLOCK_THREADS, 1)
__global__ void Kernel(
InputIteratorT d_in,
OutputIteratorT d_out_unguarded,
OutputIteratorT d_out_guarded,
int num_items)
{
enum
{
TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD
};
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// Threadblock load/store abstraction types
typedef BlockLoad<InputT, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM> BlockLoad;
typedef BlockStore<OutputT, BLOCK_THREADS, ITEMS_PER_THREAD, STORE_ALGORITHM> BlockStore;
// Shared memory type for this thread block
union TempStorage
{
typename BlockLoad::TempStorage load;
typename BlockStore::TempStorage store;
};
// Allocate temp storage in shared memory
__shared__ TempStorage temp_storage;
// Threadblock work bounds
int block_offset = blockIdx.x * TILE_SIZE;
int guarded_elements = num_items - block_offset;
// Tile of items
OutputT data[ITEMS_PER_THREAD];
// Load data
BlockLoad(temp_storage.load).Load(d_in + block_offset, data);
__syncthreads();
// Store data
BlockStore(temp_storage.store).Store(d_out_unguarded + block_offset, data);
__syncthreads();
// reset data
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
data[ITEM] = OutputT();
__syncthreads();
// Load data
BlockLoad(temp_storage.load).Load(d_in + block_offset, data, guarded_elements);
__syncthreads();
// Store data
BlockStore(temp_storage.store).Store(d_out_guarded + block_offset, data, guarded_elements);
}
//---------------------------------------------------------------------
// Host testing subroutines
//---------------------------------------------------------------------
/**
* Test load/store variants
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
typename InputIteratorT,
typename OutputIteratorT>
void TestKernel(
T *h_in,
InputIteratorT d_in,
OutputIteratorT d_out_unguarded_itr,
OutputIteratorT d_out_guarded_itr,
T *d_out_unguarded_ptr,
T *d_out_guarded_ptr,
int grid_size,
int guarded_elements)
{
int compare;
int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD;
// Test with discard output iterator
typedef typename std::iterator_traits<InputIteratorT>::difference_type OffsetT;
DiscardOutputIterator<OffsetT> discard_itr;
Kernel<BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>
<<<grid_size, BLOCK_THREADS>>>(
d_in,
discard_itr,
discard_itr,
guarded_elements);
// Test with regular output iterator
Kernel<BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>
<<<grid_size, BLOCK_THREADS>>>(
d_in,
d_out_unguarded_itr,
d_out_guarded_itr,
guarded_elements);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Check results
compare = CompareDeviceResults(h_in, d_out_guarded_ptr, guarded_elements, g_verbose, g_verbose);
printf("\tGuarded: %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Check results
compare = CompareDeviceResults(h_in, d_out_unguarded_ptr, unguarded_elements, g_verbose, g_verbose);
printf("\tUnguarded: %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
}
/**
* Test native pointer. Specialized for sufficient resources
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestNative(
int grid_size,
float fraction_valid,
Int2Type<true> sufficient_resources)
{
int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD;
int guarded_elements = int(fraction_valid * float(unguarded_elements));
// Allocate host arrays
T *h_in = (T*) malloc(unguarded_elements * sizeof(T));
// Allocate device arrays
T *d_in = NULL;
T *d_out_unguarded = NULL;
T *d_out_guarded = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_unguarded, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_guarded, sizeof(T) * guarded_elements));
CubDebugExit(cudaMemset(d_out_unguarded, 0, sizeof(T) * unguarded_elements));
CubDebugExit(cudaMemset(d_out_guarded, 0, sizeof(T) * guarded_elements));
// Initialize problem on host and device
for (int i = 0; i < unguarded_elements; ++i)
{
InitValue(INTEGER_SEED, h_in[i], i);
}
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * unguarded_elements, cudaMemcpyHostToDevice));
printf("TestNative "
"grid_size(%d) "
"guarded_elements(%d) "
"unguarded_elements(%d) "
"BLOCK_THREADS(%d) "
"ITEMS_PER_THREAD(%d) "
"LOAD_ALGORITHM(%d) "
"STORE_ALGORITHM(%d) "
"sizeof(T)(%d)\n",
grid_size, guarded_elements, unguarded_elements, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, (int) sizeof(T));
TestKernel<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>(
h_in,
(T const *) d_in, // Test const
d_out_unguarded,
d_out_guarded,
d_out_unguarded,
d_out_guarded,
grid_size,
guarded_elements);
// Cleanup
if (h_in) free(h_in);
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out_unguarded) CubDebugExit(g_allocator.DeviceFree(d_out_unguarded));
if (d_out_guarded) CubDebugExit(g_allocator.DeviceFree(d_out_guarded));
}
/**
* Test native pointer. Specialized for insufficient resources
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestNative(
int grid_size,
float fraction_valid,
Int2Type<false> sufficient_resources)
{}
/**
* Test iterator. Specialized for sufficient resources.
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
CacheLoadModifier LOAD_MODIFIER,
CacheStoreModifier STORE_MODIFIER>
void TestIterator(
int grid_size,
float fraction_valid,
Int2Type<true> sufficient_resources)
{
int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD;
int guarded_elements = int(fraction_valid * float(unguarded_elements));
// Allocate host arrays
T *h_in = (T*) malloc(unguarded_elements * sizeof(T));
// Allocate device arrays
T *d_in = NULL;
T *d_out_unguarded = NULL;
T *d_out_guarded = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_unguarded, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_guarded, sizeof(T) * guarded_elements));
CubDebugExit(cudaMemset(d_out_unguarded, 0, sizeof(T) * unguarded_elements));
CubDebugExit(cudaMemset(d_out_guarded, 0, sizeof(T) * guarded_elements));
// Initialize problem on host and device
for (int i = 0; i < unguarded_elements; ++i)
{
InitValue(INTEGER_SEED, h_in[i], i);
}
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * unguarded_elements, cudaMemcpyHostToDevice));
printf("TestIterator "
"grid_size(%d) "
"guarded_elements(%d) "
"unguarded_elements(%d) "
"BLOCK_THREADS(%d) "
"ITEMS_PER_THREAD(%d) "
"LOAD_ALGORITHM(%d) "
"STORE_ALGORITHM(%d) "
"LOAD_MODIFIER(%d) "
"STORE_MODIFIER(%d) "
"sizeof(T)(%d)\n",
grid_size, guarded_elements, unguarded_elements, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, LOAD_MODIFIER, STORE_MODIFIER, (int) sizeof(T));
TestKernel<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>(
h_in,
CacheModifiedInputIterator<LOAD_MODIFIER, T>(d_in),
CacheModifiedOutputIterator<STORE_MODIFIER, T>(d_out_unguarded),
CacheModifiedOutputIterator<STORE_MODIFIER, T>(d_out_guarded),
d_out_unguarded,
d_out_guarded,
grid_size,
guarded_elements);
// Cleanup
if (h_in) free(h_in);
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out_unguarded) CubDebugExit(g_allocator.DeviceFree(d_out_unguarded));
if (d_out_guarded) CubDebugExit(g_allocator.DeviceFree(d_out_guarded));
}
/**
* Test iterator. Specialized for insufficient resources.
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
CacheLoadModifier LOAD_MODIFIER,
CacheStoreModifier STORE_MODIFIER>
void TestIterator(
int grid_size,
float fraction_valid,
Int2Type<false> sufficient_resources)
{}
/**
* Evaluate different pointer access types
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestPointerType(
int grid_size,
float fraction_valid)
{
// Threadblock load/store abstraction types
typedef BlockLoad<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM> BlockLoad;
typedef BlockStore<T, BLOCK_THREADS, ITEMS_PER_THREAD, STORE_ALGORITHM> BlockStore;
#if defined(SM100) || defined(SM110) || defined(SM130)
static const bool sufficient_load_smem = sizeof(typename BlockLoad::TempStorage) <= 1024 * 16;
static const bool sufficient_store_smem = sizeof(typename BlockStore::TempStorage) <= 1024 * 16;
static const bool sufficient_threads = BLOCK_THREADS <= 512;
#else
static const bool sufficient_load_smem = sizeof(typename BlockLoad::TempStorage) <= 1024 * 48;
static const bool sufficient_store_smem = sizeof(typename BlockStore::TempStorage) <= 1024 * 48;
static const bool sufficient_threads = BLOCK_THREADS <= 1024;
#endif
static const bool sufficient_resources = sufficient_load_smem && sufficient_store_smem && sufficient_threads;
TestNative<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>(grid_size, fraction_valid, Int2Type<sufficient_resources>());
TestIterator<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, LOAD_DEFAULT, STORE_DEFAULT>(grid_size, fraction_valid, Int2Type<sufficient_resources>());
}
/**
* Evaluate different time-slicing strategies
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestSlicedStrategy(
int grid_size,
float fraction_valid)
{
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, true>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, false>(grid_size, fraction_valid);
}
/**
* Evaluate different load/store strategies (specialized for block sizes that are not a multiple of 32)
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD>
void TestStrategy(
int grid_size,
float fraction_valid,
Int2Type<false> is_warp_multiple)
{
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_DIRECT, BLOCK_STORE_DIRECT>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_TRANSPOSE, BLOCK_STORE_TRANSPOSE>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_VECTORIZE, BLOCK_STORE_VECTORIZE>(grid_size, fraction_valid);
}
/**
* Evaluate different load/store strategies (specialized for block sizes that are a multiple of 32)
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD>
void TestStrategy(
int grid_size,
float fraction_valid,
Int2Type<true> is_warp_multiple)
{
TestStrategy<T, BLOCK_THREADS, ITEMS_PER_THREAD>(grid_size, fraction_valid, Int2Type<false>());
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED, BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED>(grid_size, fraction_valid);
}
/**
* Evaluate different register blocking
*/
template <
typename T,
int BLOCK_THREADS>
void TestItemsPerThread(
int grid_size,
float fraction_valid)
{
Int2Type<BLOCK_THREADS % 32 == 0> is_warp_multiple;
TestStrategy<T, BLOCK_THREADS, 1>(grid_size, fraction_valid, is_warp_multiple);
TestStrategy<T, BLOCK_THREADS, 3>(grid_size, fraction_valid, is_warp_multiple);
TestStrategy<T, BLOCK_THREADS, 4>(grid_size, fraction_valid, is_warp_multiple);
TestStrategy<T, BLOCK_THREADS, 11>(grid_size, fraction_valid, is_warp_multiple);
}
/**
* Evaluate different thread block sizes
*/
template <typename T>
void TestThreads(
int grid_size,
float fraction_valid)
{
TestItemsPerThread<T, 15>(grid_size, fraction_valid);
TestItemsPerThread<T, 32>(grid_size, fraction_valid);
TestItemsPerThread<T, 72>(grid_size, fraction_valid);
TestItemsPerThread<T, 96>(grid_size, fraction_valid);
TestItemsPerThread<T, 128>(grid_size, fraction_valid);
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Get ptx version
int ptx_version;
CubDebugExit(PtxVersion(ptx_version));
#ifdef QUICK_TEST
// Compile/run quick tests
TestNative< int, 64, 2, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE>(1, 0.8f, Int2Type<true>());
TestIterator< int, 64, 2, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE, LOAD_DEFAULT, STORE_DEFAULT>(1, 0.8f, Int2Type<true>());
#else
// Compile/run thorough tests
TestThreads<char>(2, 0.8f);
TestThreads<int>(2, 0.8f);
TestThreads<long>(2, 0.8f);
TestThreads<long2>(2, 0.8f);
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
TestThreads<double2>(2, 0.8f);
TestThreads<TestFoo>(2, 0.8f);
TestThreads<TestBar>(2, 0.8f);
#endif
return 0;
}
|
0ec7186184137c1b0e5deadcdcae6fc43a1da989.hip | // !!! This is a file automatically generated by hipify!!!
///////////////////////////////////////////////////////////////////////////////
// cuda_kernel.cu
//
// Contains definitions for CUDA kernels manager
///////////////////////////////////////////////////////////////////////////////
#include "cuda_kernel.cuh"
#include <cstdio>
#include <vector>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "log.hpp"
CudaKernel cuda_create_kernel(size_t ksize)
{
LOG_INFO("Creating CUDA kernel of size %lux%lu\n", ksize, ksize);
// Get size of the buffer for kernel
const auto buffer_size = (ksize * ksize * sizeof(CudaKernel::Type));
// Allocate kernel on the device
void* data;
checkCudaErrors(hipMalloc(&data, buffer_size));
// Return created kernel
return CudaKernel { (CudaKernel::Type*)data, ksize };
}
void cuda_free_kernel(CudaKernel& kernel)
{
const auto data = kernel.data;
LOG_INFO("Releasing CUDA kernel 0x%p\n", data);
checkCudaErrors(hipFree(data));
}
void cuda_host_kernel_register(const Kernel& kernel)
{
const auto ksize = kernel.cols;
const auto data = kernel.data;
LOG_INFO("Registering host kernel %lux%lu at 0x%p\n", ksize, ksize, data);
const auto buffer_size = (ksize * ksize * sizeof(float));
const auto flags = hipHostRegisterDefault;
checkCudaErrors(hipHostRegister(data, buffer_size, flags));
}
void cuda_host_kernel_unregister(const Kernel& kernel)
{
const auto data = kernel.data;
LOG_INFO("Unregistering host kernel at 0x%p\n", data);
checkCudaErrors(hipHostUnregister(data));
}
| 0ec7186184137c1b0e5deadcdcae6fc43a1da989.cu | ///////////////////////////////////////////////////////////////////////////////
// cuda_kernel.cu
//
// Contains definitions for CUDA kernels manager
///////////////////////////////////////////////////////////////////////////////
#include "cuda_kernel.cuh"
#include <cstdio>
#include <vector>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include "log.hpp"
CudaKernel cuda_create_kernel(size_t ksize)
{
LOG_INFO("Creating CUDA kernel of size %lux%lu\n", ksize, ksize);
// Get size of the buffer for kernel
const auto buffer_size = (ksize * ksize * sizeof(CudaKernel::Type));
// Allocate kernel on the device
void* data;
checkCudaErrors(cudaMalloc(&data, buffer_size));
// Return created kernel
return CudaKernel { (CudaKernel::Type*)data, ksize };
}
void cuda_free_kernel(CudaKernel& kernel)
{
const auto data = kernel.data;
LOG_INFO("Releasing CUDA kernel 0x%p\n", data);
checkCudaErrors(cudaFree(data));
}
void cuda_host_kernel_register(const Kernel& kernel)
{
const auto ksize = kernel.cols;
const auto data = kernel.data;
LOG_INFO("Registering host kernel %lux%lu at 0x%p\n", ksize, ksize, data);
const auto buffer_size = (ksize * ksize * sizeof(float));
const auto flags = cudaHostRegisterDefault;
checkCudaErrors(cudaHostRegister(data, buffer_size, flags));
}
void cuda_host_kernel_unregister(const Kernel& kernel)
{
const auto data = kernel.data;
LOG_INFO("Unregistering host kernel at 0x%p\n", data);
checkCudaErrors(cudaHostUnregister(data));
}
|
967a4471fe929c455aa14d32eb5d3a51e94e4373.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
// __constant__: another address space.
// CUDA memory:
// * constant memory
// * global memory
// When there are data independent from threadID,
// one can utilize constant memory
__constant__ int inc;
__device__ int sum;
__global__ void atomicAdd()
{
// the two "atomicAdd"?
// This is the overload of above function.
// better to change a name
int s = atomicAdd(&sum, inc);
// as long as there is one thread that does not pass assertion,
// the whole grid will terminate.
assert((s - 1) % inc == 0);
if (threadIdx.x == 0)
{
// will buffer and copy back to CPU later
printf("blockIdx.x = %d, sum = %d\n", blockIdx.x, s);
}
}
int main(int argc, char *argv[])
{
// Initialize inc and sum.
int h_inc = 3;
int h_sum = 1;
// Copy inc and sum from host memory to device memory synchronously.
hipMemcpyToSymbol(inc, &h_inc, sizeof(int));
hipMemcpyToSymbol(sum, &h_sum, sizeof(int));
// Invoke the kernel on device asynchronously.
hipLaunchKernelGGL(( atomicAdd), dim3(3), dim3(2), 0, 0, );
// Copy sum from device memory to host memory synchronously.
hipMemcpyFromSymbol(&h_sum, sum, sizeof(int));
// Print the result.
printf("sum = %d\n", h_sum);
// Cleanup.
hipDeviceReset();
}
| 967a4471fe929c455aa14d32eb5d3a51e94e4373.cu | #include <stdio.h>
#include <assert.h>
// __constant__: another address space.
// CUDA memory:
// * constant memory
// * global memory
// When there are data independent from threadID,
// one can utilize constant memory
__constant__ int inc;
__device__ int sum;
__global__ void atomicAdd()
{
// the two "atomicAdd"?
// This is the overload of above function.
// better to change a name
int s = atomicAdd(&sum, inc);
// as long as there is one thread that does not pass assertion,
// the whole grid will terminate.
assert((s - 1) % inc == 0);
if (threadIdx.x == 0)
{
// will buffer and copy back to CPU later
printf("blockIdx.x = %d, sum = %d\n", blockIdx.x, s);
}
}
int main(int argc, char *argv[])
{
// Initialize inc and sum.
int h_inc = 3;
int h_sum = 1;
// Copy inc and sum from host memory to device memory synchronously.
cudaMemcpyToSymbol(inc, &h_inc, sizeof(int));
cudaMemcpyToSymbol(sum, &h_sum, sizeof(int));
// Invoke the kernel on device asynchronously.
atomicAdd<<<3, 2>>>();
// Copy sum from device memory to host memory synchronously.
cudaMemcpyFromSymbol(&h_sum, sum, sizeof(int));
// Print the result.
printf("sum = %d\n", h_sum);
// Cleanup.
cudaDeviceReset();
}
|
6e409d54d9f58620afa5cb531f041dbe4172b521.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
//This is the working matrix multiplication code - very basic
/****
Done:
- printing of matrix in a more pleasant manner using printMatrix function
- command line arguments
- opens matrix files and reads the matrix successfully
- working array passing from main to auxiliary (loadMatrixFile) function :)
- fixed printing of matrix
- fixed erroneous matrix values by moving loading into host matrix multiplication function!
- basic move towards SN P simulation: multiplication of s0 and Msnp
- moving from multiplication to finally simulating an SN P (sort of) in a very basic manner
- MatrixAddKernel now works :)
- Can now do Ck+1 = Ck + sk * M :)
- outputs Ck+1 to a file whose filename is entered by the user
Problems:
- (fixed) MatA and MatB values are overlapping and erroneous
TODOS:
- write Ck+1 to an output file ( done )
- error checking of switch case input ( scanf of int and char )
- use multiple files + make file
- see code comments
****/
/***
**** START of AUXILIARY functions
***/
/*
START of KERNEL functions
*/
//START vector addition kernel function
__global__ void MatrixAddKernel ( int *Md, int *Nd, int *Pd, int Width ){
// MatrixAddKernel<<< dimGrid, dimBlock >>>( Md, Nd, Pd, Width );
//dim3 dimBlock( Width, Width ); dim3 dimGrid( 1, 1 );
//int tx = threadIdx.x;
int ty = threadIdx.y;
//due to row-major ordering of matrix elements
//int Pvalue = 0;
for ( int k = 0; k < Width; ++k ){
int Mdelement = Md[ ty * Width + k ];
int Ndelement = Nd[ ty * Width + k ];
Pd[ ty * Width + k ] = Mdelement + Ndelement;
}
//Pd[ ty * Width + tx ] = Pvalue;
}
//END of kernel addition
//START of kernel multiplication
__global__ void MatrixMulKernel ( int *Md, int *Nd, int *Pd, int Width ){
int tx = threadIdx.x;
int ty = threadIdx.y;
//due to row-major ordering of matrix elements
int Pvalue = 0;
for ( int k = 0; k < Width; ++k ){
int Mdelement = Md[ ty * Width + k ];
int Ndelement = Nd[ k * Width + tx ];
Pvalue += Mdelement * Ndelement;
}
Pd[ ty * Width + tx ] = Pvalue;
}
//END of kernel multiplication
/*
END of KERNEL functions
*/
//Start of function to write Matrix to a text file
void writeMatFile( char *filename, int *matrix, int Width ) {
FILE *fp;
fp = fopen( filename, "w" );
//print dummy file data headers for now
fprintf( fp, "0 0");
int x = 0;
while( x < Width * Width ){
fprintf( fp, " %d", matrix[ x ] );
x++;
}
fclose( fp );
printf( "\nCk was successfully written to filename: %s\n", filename );
}
//Start of function to print matrix
void printMatrix ( int *M, int rows, int columns ){
//assumes matrix is in row-major format
int index;
printf ( "\n \n " );
for ( int v = 0; v < rows; v++ ){
//assumes a square matrix
for ( int w = 0; w < columns; w++ ) {
index = v * columns + w;
printf ( " %02d", M[ index ] );
}
printf ( " \n\n " );
}
}//End of printMatrix function
//START of loadMatrixFile
void loadMatrixFile( char *filename, int *z, int matWidth, int matHeight ){
int y = 0;
int w = 0;
int x;
int offset = 0;
FILE *ptr1 = fopen( filename, "r" );
fscanf( ptr1, " %d", &x );
while( !feof( ptr1 ) && y < ( matWidth * matHeight ) + 1 ){
if ( y > offset ){
fscanf( ptr1, " %d", &z[ w - offset ] );
w++;
}
else{
fscanf( ptr1, " %d", &x );
}
y++;
}
fclose( ptr1 );
}
//END of loadMatrixFile
//Start of matrix multiplication host function MatrixMul
//prototype: MatrixMul( confVec, spikVec, spikTransMat, width );
void MatrixMul( char *filename0, char *filename1, char *filename2, int Width, char *Cnext ){
int size = Width * Width * sizeof( int );
int *Md, *Nd, *Od, *Pd, *Qd;
char outFile[ 20 ];
/*
file-variable mapping:
confVec => matD, Od
spikVec => matA, Md
spikTransMat => matB, Nd
Sk-1 * M => Pd, matE
Ck = Od + Pd => Qd
*/
dim3 dimBlock( Width, Width );
dim3 dimGrid( 1, 1 );
int *matA = ( int * )malloc( size );//spikVec
loadMatrixFile( filename1, matA, Width, Width );
// printf( " \n%s after loading from file: \n", filename1 );
// printMatrix( matA, Width, Width );
int *matB = ( int * )malloc( size );//spikTransMat
loadMatrixFile( filename2, matB, Width, Width );
// printf( " \n%s after loading from file: \n", filename2 );
// printMatrix( matB, Width, Width );
int *matD = ( int * )malloc( size );//confVec
loadMatrixFile( filename0, matD, Width, Width );
// printf( " \n%s after loading from file: \n", filename0 );
// printMatrix( matD, Width, Width );
// assumes a square matrix
int *matC = ( int * )malloc( size );
int *matE = ( int * )malloc( size );
hipMalloc( ( void** ) &Md, size );//spikVec
hipMemcpy( Md, matA, size, hipMemcpyHostToDevice );
hipMalloc( ( void** ) &Nd, size );//spikTransMat
hipMemcpy( Nd, matB, size, hipMemcpyHostToDevice );
hipMalloc( ( void** ) &Pd, size );
hipMalloc( ( void** ) &Od, size );//confVec
hipMemcpy( Od, matD, size, hipMemcpyHostToDevice );
// final matrix: Ck+1 = confVec + Ck
hipMalloc( ( void** ) &Qd, size );
// Pd = spikVec * spikTransMat => Pd = Md * Nd
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, Md, Nd, Pd, Width );
// Do this only if you want to print Sk-1 * M
// hipMemcpy( matE, Pd, size, hipMemcpyDeviceToHost );
// printf( " \n%s * %s : \n", filename1, filename2 );
// printMatrix( matE, Width, Width );
// Ck+1 = confVec + Ck => Qd = Od + Pd
hipLaunchKernelGGL(( MatrixAddKernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, Od, Pd, Qd, Width );
hipMemcpy( matE, Qd, size, hipMemcpyDeviceToHost );
// printf( " \n%s + %s * %s : \n", filename0, filename1, filename2 );
// printMatrix( matE, Width, Width );
writeMatFile( Cnext, matE, Width );
free( matA ); free( matB ); free( matC ); free( matD ); free( matE );
hipFree( Md ); hipFree( Nd ); hipFree ( Pd ); hipFree( Od ); hipFree( Qd );
}
//End of Matrix multiplication function MatrixMul
/***
****END of AUXILIARY functions
****/
/***
****START of MAIN function
****/
int main ( int argc, char *argv[ ] ) {
if ( argc < 6 ){
printf( "\n Format: %s configurationVector spikingVector spikingTransitionMatrix squareMatrixWidth outputfilename\n", argv[ 0 ] );
exit( 1 );
}
char *confVec = argv[ 1 ];
char *spikVec = argv[ 2 ];
char *spikTransMat = argv[ 3 ];
int width = atoi( argv[ 4 ] );
char *Cnext = argv[ 5 ];
if( ( strlen( confVec ) ) > 20 && ( strlen( spikVec ) ) > 20 && ( strlen( spikTransMat ) ) > 20 ) {
printf( " Filename/s was/were too long ( > 20 char ) " );
// TODO: Do something about segmentation fault here when input filename is > 20 chars
//spikVec = { "\0" }; // doesn't work
//*confVec = NULL; // doesn't work
}
else {
// printf( " You entered the file %s for the spiking vector \n", spikVec );
// printf( " You entered the file %s for the configuration vector \n", confVec );
// printf( " You entered the file %s for the spiking transition matrix \n ", spikTransMat );
// printf( "\nYou have entered files %s, %s, and %s and square matrix width %d \n", spikVec, confVec, spikTransMat, width );
//load matrices from files
FILE *ptr1 = fopen( confVec, "r" );
FILE *ptr2 = fopen( spikVec, "r" );
FILE *ptr3 = fopen( spikTransMat, "r" );
if ( ptr1 == 0 && ptr2 == 0 && ptr3 == 0 ) {
printf( "\n could not open one of the following files: %s %s %s \n", spikVec, confVec, spikTransMat );
//should return something here
}
else {
MatrixMul( confVec, spikVec, spikTransMat, width, Cnext );
}
fclose( ptr1 ); fclose( ptr2 ); fclose( ptr3 );
}
}
/***
****END of MAIN function
***/
| 6e409d54d9f58620afa5cb531f041dbe4172b521.cu | #include <stdio.h>
//This is the working matrix multiplication code - very basic
/****
Done:
- printing of matrix in a more pleasant manner using printMatrix function
- command line arguments
- opens matrix files and reads the matrix successfully
- working array passing from main to auxiliary (loadMatrixFile) function :)
- fixed printing of matrix
- fixed erroneous matrix values by moving loading into host matrix multiplication function!
- basic move towards SN P simulation: multiplication of s0 and Msnp
- moving from multiplication to finally simulating an SN P (sort of) in a very basic manner
- MatrixAddKernel now works :)
- Can now do Ck+1 = Ck + sk * M :)
- outputs Ck+1 to a file whose filename is entered by the user
Problems:
- (fixed) MatA and MatB values are overlapping and erroneous
TODOS:
- write Ck+1 to an output file ( done )
- error checking of switch case input ( scanf of int and char )
- use multiple files + make file
- see code comments
****/
/***
**** START of AUXILIARY functions
***/
/*
START of KERNEL functions
*/
//START vector addition kernel function
__global__ void MatrixAddKernel ( int *Md, int *Nd, int *Pd, int Width ){
// MatrixAddKernel<<< dimGrid, dimBlock >>>( Md, Nd, Pd, Width );
//dim3 dimBlock( Width, Width ); dim3 dimGrid( 1, 1 );
//int tx = threadIdx.x;
int ty = threadIdx.y;
//due to row-major ordering of matrix elements
//int Pvalue = 0;
for ( int k = 0; k < Width; ++k ){
int Mdelement = Md[ ty * Width + k ];
int Ndelement = Nd[ ty * Width + k ];
Pd[ ty * Width + k ] = Mdelement + Ndelement;
}
//Pd[ ty * Width + tx ] = Pvalue;
}
//END of kernel addition
//START of kernel multiplication
__global__ void MatrixMulKernel ( int *Md, int *Nd, int *Pd, int Width ){
int tx = threadIdx.x;
int ty = threadIdx.y;
//due to row-major ordering of matrix elements
int Pvalue = 0;
for ( int k = 0; k < Width; ++k ){
int Mdelement = Md[ ty * Width + k ];
int Ndelement = Nd[ k * Width + tx ];
Pvalue += Mdelement * Ndelement;
}
Pd[ ty * Width + tx ] = Pvalue;
}
//END of kernel multiplication
/*
END of KERNEL functions
*/
//Start of function to write Matrix to a text file
void writeMatFile( char *filename, int *matrix, int Width ) {
FILE *fp;
fp = fopen( filename, "w" );
//print dummy file data headers for now
fprintf( fp, "0 0");
int x = 0;
while( x < Width * Width ){
fprintf( fp, " %d", matrix[ x ] );
x++;
}
fclose( fp );
printf( "\nCk was successfully written to filename: %s\n", filename );
}
//Start of function to print matrix
void printMatrix ( int *M, int rows, int columns ){
//assumes matrix is in row-major format
int index;
printf ( "\n \n " );
for ( int v = 0; v < rows; v++ ){
//assumes a square matrix
for ( int w = 0; w < columns; w++ ) {
index = v * columns + w;
printf ( " %02d", M[ index ] );
}
printf ( " \n\n " );
}
}//End of printMatrix function
//START of loadMatrixFile
void loadMatrixFile( char *filename, int *z, int matWidth, int matHeight ){
int y = 0;
int w = 0;
int x;
int offset = 0;
FILE *ptr1 = fopen( filename, "r" );
fscanf( ptr1, " %d", &x );
while( !feof( ptr1 ) && y < ( matWidth * matHeight ) + 1 ){
if ( y > offset ){
fscanf( ptr1, " %d", &z[ w - offset ] );
w++;
}
else{
fscanf( ptr1, " %d", &x );
}
y++;
}
fclose( ptr1 );
}
//END of loadMatrixFile
//Start of matrix multiplication host function MatrixMul
//prototype: MatrixMul( confVec, spikVec, spikTransMat, width );
void MatrixMul( char *filename0, char *filename1, char *filename2, int Width, char *Cnext ){
int size = Width * Width * sizeof( int );
int *Md, *Nd, *Od, *Pd, *Qd;
char outFile[ 20 ];
/*
file-variable mapping:
confVec => matD, Od
spikVec => matA, Md
spikTransMat => matB, Nd
Sk-1 * M => Pd, matE
Ck = Od + Pd => Qd
*/
dim3 dimBlock( Width, Width );
dim3 dimGrid( 1, 1 );
int *matA = ( int * )malloc( size );//spikVec
loadMatrixFile( filename1, matA, Width, Width );
// printf( " \n%s after loading from file: \n", filename1 );
// printMatrix( matA, Width, Width );
int *matB = ( int * )malloc( size );//spikTransMat
loadMatrixFile( filename2, matB, Width, Width );
// printf( " \n%s after loading from file: \n", filename2 );
// printMatrix( matB, Width, Width );
int *matD = ( int * )malloc( size );//confVec
loadMatrixFile( filename0, matD, Width, Width );
// printf( " \n%s after loading from file: \n", filename0 );
// printMatrix( matD, Width, Width );
// assumes a square matrix
int *matC = ( int * )malloc( size );
int *matE = ( int * )malloc( size );
cudaMalloc( ( void** ) &Md, size );//spikVec
cudaMemcpy( Md, matA, size, cudaMemcpyHostToDevice );
cudaMalloc( ( void** ) &Nd, size );//spikTransMat
cudaMemcpy( Nd, matB, size, cudaMemcpyHostToDevice );
cudaMalloc( ( void** ) &Pd, size );
cudaMalloc( ( void** ) &Od, size );//confVec
cudaMemcpy( Od, matD, size, cudaMemcpyHostToDevice );
// final matrix: Ck+1 = confVec + Ck
cudaMalloc( ( void** ) &Qd, size );
// Pd = spikVec * spikTransMat => Pd = Md * Nd
MatrixMulKernel<<< dimGrid, dimBlock >>>( Md, Nd, Pd, Width );
// Do this only if you want to print Sk-1 * M
// cudaMemcpy( matE, Pd, size, cudaMemcpyDeviceToHost );
// printf( " \n%s * %s : \n", filename1, filename2 );
// printMatrix( matE, Width, Width );
// Ck+1 = confVec + Ck => Qd = Od + Pd
MatrixAddKernel<<< dimGrid, dimBlock >>>( Od, Pd, Qd, Width );
cudaMemcpy( matE, Qd, size, cudaMemcpyDeviceToHost );
// printf( " \n%s + %s * %s : \n", filename0, filename1, filename2 );
// printMatrix( matE, Width, Width );
writeMatFile( Cnext, matE, Width );
free( matA ); free( matB ); free( matC ); free( matD ); free( matE );
cudaFree( Md ); cudaFree( Nd ); cudaFree ( Pd ); cudaFree( Od ); cudaFree( Qd );
}
//End of Matrix multiplication function MatrixMul
/***
****END of AUXILIARY functions
****/
/***
****START of MAIN function
****/
int main ( int argc, char *argv[ ] ) {
if ( argc < 6 ){
printf( "\n Format: %s configurationVector spikingVector spikingTransitionMatrix squareMatrixWidth outputfilename\n", argv[ 0 ] );
exit( 1 );
}
char *confVec = argv[ 1 ];
char *spikVec = argv[ 2 ];
char *spikTransMat = argv[ 3 ];
int width = atoi( argv[ 4 ] );
char *Cnext = argv[ 5 ];
if( ( strlen( confVec ) ) > 20 && ( strlen( spikVec ) ) > 20 && ( strlen( spikTransMat ) ) > 20 ) {
printf( " Filename/s was/were too long ( > 20 char ) " );
// TODO: Do something about segmentation fault here when input filename is > 20 chars
//spikVec = { "\0" }; // doesn't work
//*confVec = NULL; // doesn't work
}
else {
// printf( " You entered the file %s for the spiking vector \n", spikVec );
// printf( " You entered the file %s for the configuration vector \n", confVec );
// printf( " You entered the file %s for the spiking transition matrix \n ", spikTransMat );
// printf( "\nYou have entered files %s, %s, and %s and square matrix width %d \n", spikVec, confVec, spikTransMat, width );
//load matrices from files
FILE *ptr1 = fopen( confVec, "r" );
FILE *ptr2 = fopen( spikVec, "r" );
FILE *ptr3 = fopen( spikTransMat, "r" );
if ( ptr1 == 0 && ptr2 == 0 && ptr3 == 0 ) {
printf( "\n could not open one of the following files: %s %s %s \n", spikVec, confVec, spikTransMat );
//should return something here
}
else {
MatrixMul( confVec, spikVec, spikTransMat, width, Cnext );
}
fclose( ptr1 ); fclose( ptr2 ); fclose( ptr3 );
}
}
/***
****END of MAIN function
***/
|
ffd518043e2a3b079dc694a10c0739b20b42ea35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fill.cuh"
#include "kernel_helpers_hip.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/gpu_data/gpu_structures.h>
namespace NKernel
{
template <typename T>
__global__ void FillBufferImpl(T* buffer, T value, ui64 size, ui64 alignSize)
{
buffer += blockIdx.y * alignSize;
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
WriteThrough(buffer + i, value);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void FillBuffer(T* buffer, T value, ui64 size, ui32 columnCount, ui64 alignSize, TCudaStream stream) {
if (size > 0) {
dim3 numBlocks;
const ui32 blockSize = 128;
numBlocks.x = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
numBlocks.y = columnCount;
numBlocks.z = 1;
FillBufferImpl<T> << < numBlocks, blockSize, 0, stream>> > (buffer, value, size, alignSize);
}
}
template <typename T>
__global__ void MakeSequenceImpl(T offset, T* buffer, ui64 size)
{
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
WriteThrough(buffer + i, (T)(offset + i));
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void MakeSequence(T offset, T* buffer, ui64 size, TCudaStream stream)
{
if (size > 0)
{
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
MakeSequenceImpl<T> << < numBlocks, blockSize, 0, stream >> > (offset, buffer, size);
}
}
template <typename T>
__global__ void InversePermutationImpl(const T* indices, T* dst, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
dst[indices[i]] = i;
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void InversePermutation(const T* order, T* inverseOrder, ui64 size, TCudaStream stream)
{
if (size > 0)
{
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
InversePermutationImpl<T> << < numBlocks, blockSize, 0, stream >> > (order, inverseOrder, size);
}
}
#define FILL_BUFFER(Type)\
template void FillBuffer<Type>(Type* buffer, Type value, ui64 size, ui32 columnCount, ui64 alignSize, TCudaStream stream);
FILL_BUFFER(char) // i8 and char are distinct types
FILL_BUFFER(i8)
FILL_BUFFER(ui8)
FILL_BUFFER(i16)
FILL_BUFFER(ui16)
FILL_BUFFER(i32)
FILL_BUFFER(ui32)
FILL_BUFFER(i64)
FILL_BUFFER(ui64)
FILL_BUFFER(float)
FILL_BUFFER(double)
FILL_BUFFER(bool)
FILL_BUFFER(TCBinFeature)
#undef FILL_BUFFER
template void MakeSequence<int>(int offset, int* buffer, ui64 size, TCudaStream stream);
template void MakeSequence<ui32>(ui32 offset, ui32* buffer, ui64 size, TCudaStream stream);
template void MakeSequence<ui64>(ui64 offset, ui64* buffer, ui64 size, TCudaStream stream);
template void InversePermutation<ui32>(const ui32* order, ui32* inverseOrder, ui64 size, TCudaStream stream);
template void InversePermutation<int>(const int* order, int* inverseOrder, ui64 size, TCudaStream stream);
}
| ffd518043e2a3b079dc694a10c0739b20b42ea35.cu | #include "fill.cuh"
#include "kernel_helpers.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/gpu_data/gpu_structures.h>
namespace NKernel
{
template <typename T>
__global__ void FillBufferImpl(T* buffer, T value, ui64 size, ui64 alignSize)
{
buffer += blockIdx.y * alignSize;
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
WriteThrough(buffer + i, value);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void FillBuffer(T* buffer, T value, ui64 size, ui32 columnCount, ui64 alignSize, TCudaStream stream) {
if (size > 0) {
dim3 numBlocks;
const ui32 blockSize = 128;
numBlocks.x = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
numBlocks.y = columnCount;
numBlocks.z = 1;
FillBufferImpl<T> << < numBlocks, blockSize, 0, stream>> > (buffer, value, size, alignSize);
}
}
template <typename T>
__global__ void MakeSequenceImpl(T offset, T* buffer, ui64 size)
{
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
WriteThrough(buffer + i, (T)(offset + i));
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void MakeSequence(T offset, T* buffer, ui64 size, TCudaStream stream)
{
if (size > 0)
{
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
MakeSequenceImpl<T> << < numBlocks, blockSize, 0, stream >> > (offset, buffer, size);
}
}
template <typename T>
__global__ void InversePermutationImpl(const T* indices, T* dst, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
dst[indices[i]] = i;
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void InversePermutation(const T* order, T* inverseOrder, ui64 size, TCudaStream stream)
{
if (size > 0)
{
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
InversePermutationImpl<T> << < numBlocks, blockSize, 0, stream >> > (order, inverseOrder, size);
}
}
#define FILL_BUFFER(Type)\
template void FillBuffer<Type>(Type* buffer, Type value, ui64 size, ui32 columnCount, ui64 alignSize, TCudaStream stream);
FILL_BUFFER(char) // i8 and char are distinct types
FILL_BUFFER(i8)
FILL_BUFFER(ui8)
FILL_BUFFER(i16)
FILL_BUFFER(ui16)
FILL_BUFFER(i32)
FILL_BUFFER(ui32)
FILL_BUFFER(i64)
FILL_BUFFER(ui64)
FILL_BUFFER(float)
FILL_BUFFER(double)
FILL_BUFFER(bool)
FILL_BUFFER(TCBinFeature)
#undef FILL_BUFFER
template void MakeSequence<int>(int offset, int* buffer, ui64 size, TCudaStream stream);
template void MakeSequence<ui32>(ui32 offset, ui32* buffer, ui64 size, TCudaStream stream);
template void MakeSequence<ui64>(ui64 offset, ui64* buffer, ui64 size, TCudaStream stream);
template void InversePermutation<ui32>(const ui32* order, ui32* inverseOrder, ui64 size, TCudaStream stream);
template void InversePermutation<int>(const int* order, int* inverseOrder, ui64 size, TCudaStream stream);
}
|
d0f91e1242e3b153d3aeb7bdf9c31f00ca6ec19f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Parallel Graph Preprocessing of cuckoo filter
* This preprocesses a batch insertion into a cuckoo filter by creating a directed graph (V,E) where:
* V is a set of vertices that represent each bucket of the cuckoo filter
* E is a set of edges (u,v) with weight w where:
* w is the fingerprint of a specific entry
* u is the bucket number given by hash(entry)
* v is the bucket number given by hash(entry) xor hash(fingerprint)
* dir indicates the vertex pointed to by the edge. Also indicates
* which bucket number the fingerprint should be placed in.
*/
#include <cstring>
#include <stdexcept>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <climits>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <sys/time.h>
#define LARGE_THRESHOLD_VAL 10000
double preprocessTime = 0;
double insertTime = 0;
struct timeval StartingTime;
void setTime(){
gettimeofday( &StartingTime, NULL );
}
double getTime(){
struct timeval PausingTime, ElapsedTime;
gettimeofday( &PausingTime, NULL );
timersub(&PausingTime, &StartingTime, &ElapsedTime);
return ElapsedTime.tv_sec*1000.0+ElapsedTime.tv_usec/1000.0; // Returning in milliseconds.
}
__device__ void random(unsigned int seed, int* result, int max) {
/* CUDA's random number library uses hiprandState_t to keep track of the seed value
we will store a random state for every thread */
hiprandState_t state;
/* we have to initialize the state */
hiprand_init(seed, /* the seed controls the sequence of random values that are produced */
0, /* the sequence number is only important with multiple cores */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&state);
/* hiprand works like rand - except that it takes a state as a parameter */
*result = hiprand(&state) % max;
}
template <typename T_file>
void openFileToAccess( T_file& input_file, std::string file_name ) {
input_file.open( file_name.c_str() );
if( !input_file )
throw std::runtime_error( "Failed to open specified file: " + file_name + "\n" );
}
void * cudaMallocAndCpy(int size, void * hostMemory) {
void * gpuMem;
hipMalloc((void**) &gpuMem, size);
if (hostMemory != NULL) {
hipMemcpy(gpuMem, hostMemory, size, hipMemcpyHostToDevice);
}
return gpuMem;
}
void cudaGetFromGPU(void * destination, void * gpuMemory, int size) {
hipMemcpy(destination, gpuMemory, size, hipMemcpyDeviceToHost);
}
void cudaSendToGPU(void * destination, void * hostMemory, int size) {
hipMemcpy(destination, hostMemory, size, hipMemcpyHostToDevice);
}
class Edge {
public:
unsigned int src; //hash(x) location
unsigned int dst; //hash(x) xor hash(fp) location
unsigned char fp; //fingerprint
int dir; //0 to be src, 1 to be dst
__device__ __host__ Edge(){}
};
class Graph {
public:
int *buckets; //value at index i is the number of indegrees to a bucket i
Edge *edges;
unsigned int num_edges;
unsigned int num_buckets;
unsigned int max_bucket_size;
__device__ __host__ Graph(unsigned int edges, unsigned int nb, unsigned int bucket_size) {
num_edges = edges;
num_buckets = nb;
max_bucket_size = bucket_size;
buckets = new int[num_buckets]();
edges = NULL;
}
__device__ void printGraph() {
int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
if(thread_id == 0) {
for(int i=0; i<num_edges; i++) {
printf("Edge %d: %u \t src: %u \t dst: %u\n",i, edges[i].fp, edges[i].src, edges[i].dst);
}
printCollisions();
}
}
__device__ void printCollisions() {
int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
if(thread_id == 0) {
printf("\n\nBuckets\n");
for(int i=0; i< num_buckets ; i++) {
if(buckets[i] > max_bucket_size) {
printf("Collisions for bucket %d: %d\n", i, buckets[i]);
}
}
}
}
};
// __global__ void setup_kernel (hiprandState_t * state, Graph *g)
// {
// int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
// // change sequence number to currIdx if values are too correlated
// // hiprand_init(1234, 0, 0, &state[currIdx]);
// hiprand_init(1234, 0, 0, &state[currIdx]);
// }
/**
* Parallel graph building
* @param entries is a list of entries to enter
* @param entryListSize is the size of the @param entries list
* @param g is an address in the GPU to pla\ce result. Assumes g->edges has been given enough space for @param entryListSize items
*/
__global__ void findAllCollisions(int* entries, int entryListSize, Graph * g) {
int total_threads = blockDim.x * gridDim.x; //total threads
int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
int thread_id_block = threadIdx.x; //thread number in block
// CHANGE BELOW LINE TO BE MORE EFFICIENT
int rounds = entryListSize % total_threads == 0 ? (entryListSize/total_threads):((entryListSize/total_threads)+1);
g->num_edges = entryListSize;
for (size_t i = 0; i <rounds; i++) {
int currIdx = i*total_threads + thread_id;
if(currIdx < entryListSize) {
int * entry = &entries[currIdx];
//printf("KERNEL SPACE current Index %d, Thread id %d: %x\n", currIdx, thread_id, entry);
unsigned int bucket1;
hash_item((unsigned char*) entry,
4,
g->num_buckets,
HASHFUN_NORM,
&bucket1);
const uint64_t hash = TwoIndependentMultiplyShift(*entry);
unsigned char fp = (unsigned char) hash;
unsigned int fpHash;
hash_item((unsigned char*) &fp,
1,
g->num_buckets,
HASHFUN_NORM,
&fpHash);
unsigned int bucket2 = ((bucket1 ^ fpHash) & 0b11111111) % g->num_buckets;
//build edge
g->edges[currIdx].fp = fp;
g->edges[currIdx].src = bucket1 % g->num_buckets;
g->edges[currIdx].dst = bucket2 % g->num_buckets;
// Copy state to local memory for efficiency */
// hiprandState_t local_state = global_state[thread_id];
// /* Generate pseudo - random unsigned ints
// g->edges[i].dir = hiprand_uniform(&local_state);
//update bucket
atomicAdd(&(g->buckets[bucket1]), 1);
}
}
}
__global__ void resetCollisions(Graph * g) {
int total_threads = blockDim.x * gridDim.x; //total threads
int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
int thread_id_block = threadIdx.x; //thread number in block
int rounds = (g->num_buckets % total_threads == 0) ? (g->num_buckets/total_threads):(g->num_buckets/total_threads + 1);
for (size_t iter = 0; iter < rounds; iter++) {
int currIdx = iter*total_threads + thread_id;
if(currIdx < g->num_buckets) {
int * currBucket = &(g->buckets[currIdx]);
*currBucket = 0;
}
}
rounds = (g->num_edges % total_threads == 0) ? (g->num_edges/total_threads):(g->num_edges/total_threads + 1);
for (size_t iter = 0; iter < rounds; iter++) {
int currIdx = iter*total_threads + thread_id;
if(currIdx < g->num_edges) {
int b = (g->edges[currIdx].dir == 0) ? (g->edges[currIdx].src):(g->edges[currIdx].dst);
atomicAdd(&(g->buckets[b]),1);
}
}
//g->printCollisions();
}
/**
* Edge Processing Kernel
* Finds random edges to evict until capacity for each bucket is equal to 0
*
*/
__global__ void processEdges(Graph * g, int* anyChange, unsigned int randNum) {
int total_threads = blockDim.x * gridDim.x; //total threads
int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
int thread_id_block = threadIdx.x; //thread number in block
int num_edges = g->num_edges;
int rounds = num_edges % total_threads == 0 ? (num_edges/total_threads):(num_edges/total_threads+1);
for(int i=0; i<rounds; i++) {
int currIdx = total_threads*i + thread_id; //current edge to process
if(currIdx < g->num_edges) {
Edge *e = &g->edges[currIdx];
//determine the bucket it's in
int curr_bucket = e->dir == 0 ? e->src:e->dst;
//check the bucket
int * bucketCount = &(g->buckets[curr_bucket]);
int tmp = *bucketCount;
//decrement the bucket count if > 0
//int rand;
//random((unsigned int)clock() + thread_id, &rand, 50);
if(*bucketCount > g->max_bucket_size) {
int old = atomicDec((unsigned int *)bucketCount, INT_MAX);
old--;
int shift = randNum % tmp;
int shiftedValue = old - shift;
int bucketOffset = (shiftedValue < 0) ? shiftedValue + tmp : shiftedValue;
//if (e->dir) {
// } else {
// printf("tmp %d, old %d, shift %d, shiftedValue %d, bucketOffset %d \t Evicting %d from %d to %d\n", tmp, old, shift, shiftedValue, bucketOffset, e->fp, e->src, e->dst);
// }
//printf("tmp %d, old %d, shift %d, shiftedValue %d, bucketOffset %d\n", tmp, old, shift, shiftedValue, bucketOffset);
if (bucketOffset >= g->max_bucket_size && old < LARGE_THRESHOLD_VAL){
e->dir = !e->dir; // flip the bit
// if (e->dir)
// printf("Evicting %d from %d to %d\n", e->fp, e->src, e->dst);
// else
// printf("Evicting %d from %d to %d\n", e->fp, e->dst, e->src);
*anyChange = 1;
}
}
}
}
//g->printCollisions();
}
void initGraphCPU(int entry_size) {
Graph * graph;
hipMalloc(&graph, sizeof(Graph));
Edge * e;
hipMalloc(&e, sizeof(Edge)*entry_size);
}
__global__ void makeGraphCuckoo(Graph * g, CuckooFilter * c, int * globalByteMask) {
int total_threads = blockDim.x * gridDim.x; //total threads
int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
int thread_id_block = threadIdx.x; //thread number in block
// if (thread_id==0) {
// c->printFilter();
// printf("\n");
// }
int rounds = (g->num_edges % total_threads == 0) ? (g->num_edges/total_threads):((g->num_edges/total_threads)+1);
for (size_t i = 0; i < rounds; i++) {
int currIdx = total_threads*i + thread_id;
if(currIdx < g->num_edges) {
Edge * e = &(g->edges[currIdx]);
int currBucket = e->dir == 0 ? e->src:e->dst;
int index = atomicAdd(&(globalByteMask[currBucket]), 1);
c->insert(e->fp,currBucket,index);
}
}
__syncthreads();
// if (thread_id==0) {
// c->printFilter();
// }
}
double transferToCuckooFilter(Graph * g, CuckooFilter * c) {
Graph * h_graph = (Graph*)malloc(sizeof(Graph));
cudaGetFromGPU(h_graph, g, sizeof(Graph));
int * byteMask = new int[h_graph->num_buckets];
for (size_t i = 0; i < h_graph->num_buckets; i++) {
byteMask[i] = 0;
}
int * g_byteMask = (int*)cudaMallocAndCpy(sizeof(int)*h_graph->num_buckets,(void*) byteMask);
setTime();
hipLaunchKernelGGL(( makeGraphCuckoo), dim3(ceil((double)h_graph->num_buckets/1024)), dim3(1024), 0, 0, g, c, g_byteMask);
hipDeviceSynchronize();
double insertTime = getTime();
delete byteMask;
return insertTime;
}
int insert(int* entries, unsigned int num_entries, unsigned int num_buckets, unsigned int bucket_size, CuckooFilter * cf){
std::cout << "Inserting " << num_entries << " entries"<< std::endl;
const int fail_threshold = (int)(sqrt(num_buckets*bucket_size)*log2((float)(num_buckets*bucket_size)));
int anychange = 1;
int * d_change = (int *) cudaMallocAndCpy(sizeof(int), &anychange);
Graph *h_graph = new Graph(num_entries, num_buckets, bucket_size);
//set up pointer
hipMalloc((void**)&(h_graph->edges), sizeof(Edge)*num_entries);
hipMalloc((void**)&(h_graph->buckets), sizeof(int)*num_buckets);
Graph *d_graph = (Graph *) cudaMallocAndCpy(sizeof(Graph), h_graph);
int * d_entries = (int *) cudaMallocAndCpy(sizeof(int)*num_entries, entries);
std::cout << "Calling kernel" << std::endl;
setTime();
hipLaunchKernelGGL(( findAllCollisions), dim3(2), dim3(512), 0, 0, d_entries, num_entries, d_graph);
hipDeviceSynchronize();
preprocessTime = getTime();
int count = 0;
while (anychange != 0){
anychange = 0;
cudaSendToGPU(d_change, &anychange, sizeof(int));
// generate random number
setTime();
unsigned int randNum = rand() % (num_buckets * 8);
//std::cout << "Found all collisions, rand num: "<< randNum << std::endl;
hipLaunchKernelGGL(( processEdges), dim3(ceil((double)num_entries/1024)), dim3(1024), 0, 0, d_graph, d_change, randNum);
hipDeviceSynchronize();
preprocessTime += getTime();
//std::cout << "Proccessed edge using " << ceil((double)num_entries/1024) << "threads " << std::endl;
cudaGetFromGPU(&anychange, d_change, sizeof(int));
//std::cout << "Got value of anychange: " << anychange << std::endl;
if(anychange == 1){
setTime();
hipLaunchKernelGGL(( resetCollisions), dim3(ceil((double)num_entries/1024)), dim3(1024), 0, 0, d_graph);
hipDeviceSynchronize();
preprocessTime += getTime();
}
count++;
if (count >= fail_threshold)
return count;
}
CuckooFilter * g_cf = (CuckooFilter *)cudaMallocAndCpy(sizeof(CuckooFilter), cf);
setTime();
insertTime = transferToCuckooFilter(d_graph, g_cf);
cudaGetFromGPU(cf,g_cf, sizeof(CuckooFilter));
printf("Preprocessing time %f\n", preprocessTime);
printf("Insertion time: %f\n", insertTime);
printf("Completed insertion with %d iterations\n",count);
return 0;
}
| d0f91e1242e3b153d3aeb7bdf9c31f00ca6ec19f.cu | /*
* Parallel Graph Preprocessing of cuckoo filter
* This preprocesses a batch insertion into a cuckoo filter by creating a directed graph (V,E) where:
* V is a set of vertices that represent each bucket of the cuckoo filter
* E is a set of edges (u,v) with weight w where:
* w is the fingerprint of a specific entry
* u is the bucket number given by hash(entry)
* v is the bucket number given by hash(entry) xor hash(fingerprint)
* dir indicates the vertex pointed to by the edge. Also indicates
* which bucket number the fingerprint should be placed in.
*/
#include <cstring>
#include <stdexcept>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <climits>
#include <curand.h>
#include <curand_kernel.h>
#include <sys/time.h>
#define LARGE_THRESHOLD_VAL 10000
double preprocessTime = 0;
double insertTime = 0;
struct timeval StartingTime;
void setTime(){
gettimeofday( &StartingTime, NULL );
}
double getTime(){
struct timeval PausingTime, ElapsedTime;
gettimeofday( &PausingTime, NULL );
timersub(&PausingTime, &StartingTime, &ElapsedTime);
return ElapsedTime.tv_sec*1000.0+ElapsedTime.tv_usec/1000.0; // Returning in milliseconds.
}
__device__ void random(unsigned int seed, int* result, int max) {
/* CUDA's random number library uses curandState_t to keep track of the seed value
we will store a random state for every thread */
curandState_t state;
/* we have to initialize the state */
curand_init(seed, /* the seed controls the sequence of random values that are produced */
0, /* the sequence number is only important with multiple cores */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&state);
/* curand works like rand - except that it takes a state as a parameter */
*result = curand(&state) % max;
}
template <typename T_file>
void openFileToAccess( T_file& input_file, std::string file_name ) {
input_file.open( file_name.c_str() );
if( !input_file )
throw std::runtime_error( "Failed to open specified file: " + file_name + "\n" );
}
void * cudaMallocAndCpy(int size, void * hostMemory) {
void * gpuMem;
cudaMalloc((void**) &gpuMem, size);
if (hostMemory != NULL) {
cudaMemcpy(gpuMem, hostMemory, size, cudaMemcpyHostToDevice);
}
return gpuMem;
}
void cudaGetFromGPU(void * destination, void * gpuMemory, int size) {
cudaMemcpy(destination, gpuMemory, size, cudaMemcpyDeviceToHost);
}
void cudaSendToGPU(void * destination, void * hostMemory, int size) {
cudaMemcpy(destination, hostMemory, size, cudaMemcpyHostToDevice);
}
class Edge {
public:
unsigned int src; //hash(x) location
unsigned int dst; //hash(x) xor hash(fp) location
unsigned char fp; //fingerprint
int dir; //0 to be src, 1 to be dst
__device__ __host__ Edge(){}
};
class Graph {
public:
int *buckets; //value at index i is the number of indegrees to a bucket i
Edge *edges;
unsigned int num_edges;
unsigned int num_buckets;
unsigned int max_bucket_size;
__device__ __host__ Graph(unsigned int edges, unsigned int nb, unsigned int bucket_size) {
num_edges = edges;
num_buckets = nb;
max_bucket_size = bucket_size;
buckets = new int[num_buckets]();
edges = NULL;
}
__device__ void printGraph() {
int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
if(thread_id == 0) {
for(int i=0; i<num_edges; i++) {
printf("Edge %d: %u \t src: %u \t dst: %u\n",i, edges[i].fp, edges[i].src, edges[i].dst);
}
printCollisions();
}
}
__device__ void printCollisions() {
int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
if(thread_id == 0) {
printf("\n\nBuckets\n");
for(int i=0; i< num_buckets ; i++) {
if(buckets[i] > max_bucket_size) {
printf("Collisions for bucket %d: %d\n", i, buckets[i]);
}
}
}
}
};
// __global__ void setup_kernel (curandState * state, Graph *g)
// {
// int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
// // change sequence number to currIdx if values are too correlated
// // curand_init(1234, 0, 0, &state[currIdx]);
// curand_init(1234, 0, 0, &state[currIdx]);
// }
/**
* Parallel graph building
* @param entries is a list of entries to enter
* @param entryListSize is the size of the @param entries list
* @param g is an address in the GPU to pla\ce result. Assumes g->edges has been given enough space for @param entryListSize items
*/
__global__ void findAllCollisions(int* entries, int entryListSize, Graph * g) {
int total_threads = blockDim.x * gridDim.x; //total threads
int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
int thread_id_block = threadIdx.x; //thread number in block
// CHANGE BELOW LINE TO BE MORE EFFICIENT
int rounds = entryListSize % total_threads == 0 ? (entryListSize/total_threads):((entryListSize/total_threads)+1);
g->num_edges = entryListSize;
for (size_t i = 0; i <rounds; i++) {
int currIdx = i*total_threads + thread_id;
if(currIdx < entryListSize) {
int * entry = &entries[currIdx];
//printf("KERNEL SPACE current Index %d, Thread id %d: %x\n", currIdx, thread_id, entry);
unsigned int bucket1;
hash_item((unsigned char*) entry,
4,
g->num_buckets,
HASHFUN_NORM,
&bucket1);
const uint64_t hash = TwoIndependentMultiplyShift(*entry);
unsigned char fp = (unsigned char) hash;
unsigned int fpHash;
hash_item((unsigned char*) &fp,
1,
g->num_buckets,
HASHFUN_NORM,
&fpHash);
unsigned int bucket2 = ((bucket1 ^ fpHash) & 0b11111111) % g->num_buckets;
//build edge
g->edges[currIdx].fp = fp;
g->edges[currIdx].src = bucket1 % g->num_buckets;
g->edges[currIdx].dst = bucket2 % g->num_buckets;
// Copy state to local memory for efficiency */
// curandState local_state = global_state[thread_id];
// /* Generate pseudo - random unsigned ints
// g->edges[i].dir = curand_uniform(&local_state);
//update bucket
atomicAdd(&(g->buckets[bucket1]), 1);
}
}
}
__global__ void resetCollisions(Graph * g) {
int total_threads = blockDim.x * gridDim.x; //total threads
int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
int thread_id_block = threadIdx.x; //thread number in block
int rounds = (g->num_buckets % total_threads == 0) ? (g->num_buckets/total_threads):(g->num_buckets/total_threads + 1);
for (size_t iter = 0; iter < rounds; iter++) {
int currIdx = iter*total_threads + thread_id;
if(currIdx < g->num_buckets) {
int * currBucket = &(g->buckets[currIdx]);
*currBucket = 0;
}
}
rounds = (g->num_edges % total_threads == 0) ? (g->num_edges/total_threads):(g->num_edges/total_threads + 1);
for (size_t iter = 0; iter < rounds; iter++) {
int currIdx = iter*total_threads + thread_id;
if(currIdx < g->num_edges) {
int b = (g->edges[currIdx].dir == 0) ? (g->edges[currIdx].src):(g->edges[currIdx].dst);
atomicAdd(&(g->buckets[b]),1);
}
}
//g->printCollisions();
}
/**
* Edge Processing Kernel
* Finds random edges to evict until capacity for each bucket is equal to 0
*
*/
__global__ void processEdges(Graph * g, int* anyChange, unsigned int randNum) {
int total_threads = blockDim.x * gridDim.x; //total threads
int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
int thread_id_block = threadIdx.x; //thread number in block
int num_edges = g->num_edges;
int rounds = num_edges % total_threads == 0 ? (num_edges/total_threads):(num_edges/total_threads+1);
for(int i=0; i<rounds; i++) {
int currIdx = total_threads*i + thread_id; //current edge to process
if(currIdx < g->num_edges) {
Edge *e = &g->edges[currIdx];
//determine the bucket it's in
int curr_bucket = e->dir == 0 ? e->src:e->dst;
//check the bucket
int * bucketCount = &(g->buckets[curr_bucket]);
int tmp = *bucketCount;
//decrement the bucket count if > 0
//int rand;
//random((unsigned int)clock() + thread_id, &rand, 50);
if(*bucketCount > g->max_bucket_size) {
int old = atomicDec((unsigned int *)bucketCount, INT_MAX);
old--;
int shift = randNum % tmp;
int shiftedValue = old - shift;
int bucketOffset = (shiftedValue < 0) ? shiftedValue + tmp : shiftedValue;
//if (e->dir) {
// } else {
// printf("tmp %d, old %d, shift %d, shiftedValue %d, bucketOffset %d \t Evicting %d from %d to %d\n", tmp, old, shift, shiftedValue, bucketOffset, e->fp, e->src, e->dst);
// }
//printf("tmp %d, old %d, shift %d, shiftedValue %d, bucketOffset %d\n", tmp, old, shift, shiftedValue, bucketOffset);
if (bucketOffset >= g->max_bucket_size && old < LARGE_THRESHOLD_VAL){
e->dir = !e->dir; // flip the bit
// if (e->dir)
// printf("Evicting %d from %d to %d\n", e->fp, e->src, e->dst);
// else
// printf("Evicting %d from %d to %d\n", e->fp, e->dst, e->src);
*anyChange = 1;
}
}
}
}
//g->printCollisions();
}
void initGraphCPU(int entry_size) {
Graph * graph;
cudaMalloc(&graph, sizeof(Graph));
Edge * e;
cudaMalloc(&e, sizeof(Edge)*entry_size);
}
__global__ void makeGraphCuckoo(Graph * g, CuckooFilter * c, int * globalByteMask) {
int total_threads = blockDim.x * gridDim.x; //total threads
int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //real thread number
int thread_id_block = threadIdx.x; //thread number in block
// if (thread_id==0) {
// c->printFilter();
// printf("\n");
// }
int rounds = (g->num_edges % total_threads == 0) ? (g->num_edges/total_threads):((g->num_edges/total_threads)+1);
for (size_t i = 0; i < rounds; i++) {
int currIdx = total_threads*i + thread_id;
if(currIdx < g->num_edges) {
Edge * e = &(g->edges[currIdx]);
int currBucket = e->dir == 0 ? e->src:e->dst;
int index = atomicAdd(&(globalByteMask[currBucket]), 1);
c->insert(e->fp,currBucket,index);
}
}
__syncthreads();
// if (thread_id==0) {
// c->printFilter();
// }
}
double transferToCuckooFilter(Graph * g, CuckooFilter * c) {
Graph * h_graph = (Graph*)malloc(sizeof(Graph));
cudaGetFromGPU(h_graph, g, sizeof(Graph));
int * byteMask = new int[h_graph->num_buckets];
for (size_t i = 0; i < h_graph->num_buckets; i++) {
byteMask[i] = 0;
}
int * g_byteMask = (int*)cudaMallocAndCpy(sizeof(int)*h_graph->num_buckets,(void*) byteMask);
setTime();
makeGraphCuckoo<<<ceil((double)h_graph->num_buckets/1024), 1024>>>(g, c, g_byteMask);
cudaDeviceSynchronize();
double insertTime = getTime();
delete byteMask;
return insertTime;
}
int insert(int* entries, unsigned int num_entries, unsigned int num_buckets, unsigned int bucket_size, CuckooFilter * cf){
std::cout << "Inserting " << num_entries << " entries"<< std::endl;
const int fail_threshold = (int)(sqrt(num_buckets*bucket_size)*log2((float)(num_buckets*bucket_size)));
int anychange = 1;
int * d_change = (int *) cudaMallocAndCpy(sizeof(int), &anychange);
Graph *h_graph = new Graph(num_entries, num_buckets, bucket_size);
//set up pointer
cudaMalloc((void**)&(h_graph->edges), sizeof(Edge)*num_entries);
cudaMalloc((void**)&(h_graph->buckets), sizeof(int)*num_buckets);
Graph *d_graph = (Graph *) cudaMallocAndCpy(sizeof(Graph), h_graph);
int * d_entries = (int *) cudaMallocAndCpy(sizeof(int)*num_entries, entries);
std::cout << "Calling kernel" << std::endl;
setTime();
findAllCollisions<<<2, 512>>>(d_entries, num_entries, d_graph);
cudaDeviceSynchronize();
preprocessTime = getTime();
int count = 0;
while (anychange != 0){
anychange = 0;
cudaSendToGPU(d_change, &anychange, sizeof(int));
// generate random number
setTime();
unsigned int randNum = rand() % (num_buckets * 8);
//std::cout << "Found all collisions, rand num: "<< randNum << std::endl;
processEdges<<<ceil((double)num_entries/1024), 1024>>>(d_graph, d_change, randNum);
cudaDeviceSynchronize();
preprocessTime += getTime();
//std::cout << "Proccessed edge using " << ceil((double)num_entries/1024) << "threads " << std::endl;
cudaGetFromGPU(&anychange, d_change, sizeof(int));
//std::cout << "Got value of anychange: " << anychange << std::endl;
if(anychange == 1){
setTime();
resetCollisions<<<ceil((double)num_entries/1024), 1024>>>(d_graph);
cudaDeviceSynchronize();
preprocessTime += getTime();
}
count++;
if (count >= fail_threshold)
return count;
}
CuckooFilter * g_cf = (CuckooFilter *)cudaMallocAndCpy(sizeof(CuckooFilter), cf);
setTime();
insertTime = transferToCuckooFilter(d_graph, g_cf);
cudaGetFromGPU(cf,g_cf, sizeof(CuckooFilter));
printf("Preprocessing time %f\n", preprocessTime);
printf("Insertion time: %f\n", insertTime);
printf("Completed insertion with %d iterations\n",count);
return 0;
}
|
180b9b5144b652f912d7e52f7e22769bf18c94ba.hip | // !!! This is a file automatically generated by hipify!!!
#define CPLUSPLUS
#include <stdio.h>
#include "MDSystem_interface.h"
#include "common.h"
#include "BoxGeometry.h"
#include "MDSystem.h"
#include "RandomGenerator.h"
#include "Auxiliary.h"
#include "NeighborList_interface.h"
#include"Statistic.h"
#include "Integrator_interface.h"
#include "InteractionEngine_interface.h"
#include "tmp.h"
#include "Reshuffle_interface.h"
#include "Displacement_interface.h"
#include "Topology.h"
#include "SystemBondedInteraction.h"
#include "BondInteraction.h"
#include "NonBondedInteraction.h"
#include "xdrfile/xdrfile.h"
#include "xdrfile/xdrfile_xtc.h"
// #include "GroFileManager.h"
#include "ErrorProfile.h"
#include "AdaptRCut.h"
#include "AssignRCut.h"
#define NThreadsPerBlockCell 128
#define NThreadsPerBlockAtom 96
int main(int argc, char * argv[])
{
char * filename;
char rcutsavename [1024];
if (argc != 6){
printf ("Usage:\n%s conf.gro rcut.save device refh rcut2\n", argv[0]);
return 1;
}
filename = argv[1];
strncpy (rcutsavename, argv[2], 1024);
printf ("# setting device to %d\n", atoi(argv[3]));
hipSetDevice (atoi(argv[3]));
checkCUDAError ("set device");
double refh = atof (argv[4]);
MDSystem sys;
sys.initConfig(filename);
Topology::System sysTop;
Topology::Molecule mol;
mol.pushAtom (Topology::Atom (1.0, 0.0, 0));
LennardJones6_12Parameter ljparam;
ScalorType rcut2 = atof(argv[5]);
printf ("# rcut2 is %f\n", rcut2);
int nimage = (rcut2 - 0.00001) / sys.box.size.y;
nimage ++;
printf ("#@ nimage is %d\n", nimage);
ljparam.reinit (1.f, 1.f, 0.f, 0.f, rcut2);
sysTop.addNonBondedInteraction (Topology::NonBondedInteraction(0, 0, ljparam));
sysTop.addMolecules (mol, sys.hdata.numAtom);
sys.initTopology (sysTop);
sys.initDeviceData ();
SystemNonBondedInteraction sysNbInter;
sysNbInter.reinit (sysTop);
InteractionEngine inter (sys, NThreadsPerBlockAtom);
inter.registNonBondedInteraction (sysNbInter);
inter.clearInteraction (sys);
inter.applyNonBondedInteraction (sys, rcut2);
std::vector<double > boxsize (3);
boxsize[0] = sys.box.size.x;
boxsize[1] = sys.box.size.y;
boxsize[2] = sys.box.size.z;
ErrorProfile_PiecewiseConst ep (boxsize, refh);
AdaptRCut arc;
arc.load_rc (std::string(rcutsavename));
AssignRCut assign;
assign.reinit (sys, arc, NThreadsPerBlockAtom);
assign.getRCut (arc);
assign.assign (sys);
inter.clearInteraction (sys);
inter.applyNonBondedInteraction (sys, NULL, rcut2);
cpyDeviceMDDataToHost (&sys.ddata, &sys.hdata);
std::vector<std::vector<double > > coord, force;
for (unsigned i = 0; i < sys.hdata.numAtom; ++i){
std::vector<double > tmp(3);
tmp[0] = sys.hdata.coord[i].x;
tmp[1] = sys.hdata.coord[i].y;
tmp[2] = sys.hdata.coord[i].z;
coord.push_back (tmp);
tmp[0] = sys.hdata.forcx[i];
tmp[1] = sys.hdata.forcy[i];
tmp[2] = sys.hdata.forcz[i];
force.push_back (tmp);
}
ep.deposit (coord, force);
ep.calculate();
// ep.print_x (("real.x.out"));
ep.print_x_avg (("a.real.x.out"));
// ep.print_xy (("real.xy.out"));
return 0;
}
| 180b9b5144b652f912d7e52f7e22769bf18c94ba.cu | #define CPLUSPLUS
#include <stdio.h>
#include "MDSystem_interface.h"
#include "common.h"
#include "BoxGeometry.h"
#include "MDSystem.h"
#include "RandomGenerator.h"
#include "Auxiliary.h"
#include "NeighborList_interface.h"
#include"Statistic.h"
#include "Integrator_interface.h"
#include "InteractionEngine_interface.h"
#include "tmp.h"
#include "Reshuffle_interface.h"
#include "Displacement_interface.h"
#include "Topology.h"
#include "SystemBondedInteraction.h"
#include "BondInteraction.h"
#include "NonBondedInteraction.h"
#include "xdrfile/xdrfile.h"
#include "xdrfile/xdrfile_xtc.h"
// #include "GroFileManager.h"
#include "ErrorProfile.h"
#include "AdaptRCut.h"
#include "AssignRCut.h"
#define NThreadsPerBlockCell 128
#define NThreadsPerBlockAtom 96
int main(int argc, char * argv[])
{
char * filename;
char rcutsavename [1024];
if (argc != 6){
printf ("Usage:\n%s conf.gro rcut.save device refh rcut2\n", argv[0]);
return 1;
}
filename = argv[1];
strncpy (rcutsavename, argv[2], 1024);
printf ("# setting device to %d\n", atoi(argv[3]));
cudaSetDevice (atoi(argv[3]));
checkCUDAError ("set device");
double refh = atof (argv[4]);
MDSystem sys;
sys.initConfig(filename);
Topology::System sysTop;
Topology::Molecule mol;
mol.pushAtom (Topology::Atom (1.0, 0.0, 0));
LennardJones6_12Parameter ljparam;
ScalorType rcut2 = atof(argv[5]);
printf ("# rcut2 is %f\n", rcut2);
int nimage = (rcut2 - 0.00001) / sys.box.size.y;
nimage ++;
printf ("#@ nimage is %d\n", nimage);
ljparam.reinit (1.f, 1.f, 0.f, 0.f, rcut2);
sysTop.addNonBondedInteraction (Topology::NonBondedInteraction(0, 0, ljparam));
sysTop.addMolecules (mol, sys.hdata.numAtom);
sys.initTopology (sysTop);
sys.initDeviceData ();
SystemNonBondedInteraction sysNbInter;
sysNbInter.reinit (sysTop);
InteractionEngine inter (sys, NThreadsPerBlockAtom);
inter.registNonBondedInteraction (sysNbInter);
inter.clearInteraction (sys);
inter.applyNonBondedInteraction (sys, rcut2);
std::vector<double > boxsize (3);
boxsize[0] = sys.box.size.x;
boxsize[1] = sys.box.size.y;
boxsize[2] = sys.box.size.z;
ErrorProfile_PiecewiseConst ep (boxsize, refh);
AdaptRCut arc;
arc.load_rc (std::string(rcutsavename));
AssignRCut assign;
assign.reinit (sys, arc, NThreadsPerBlockAtom);
assign.getRCut (arc);
assign.assign (sys);
inter.clearInteraction (sys);
inter.applyNonBondedInteraction (sys, NULL, rcut2);
cpyDeviceMDDataToHost (&sys.ddata, &sys.hdata);
std::vector<std::vector<double > > coord, force;
for (unsigned i = 0; i < sys.hdata.numAtom; ++i){
std::vector<double > tmp(3);
tmp[0] = sys.hdata.coord[i].x;
tmp[1] = sys.hdata.coord[i].y;
tmp[2] = sys.hdata.coord[i].z;
coord.push_back (tmp);
tmp[0] = sys.hdata.forcx[i];
tmp[1] = sys.hdata.forcy[i];
tmp[2] = sys.hdata.forcz[i];
force.push_back (tmp);
}
ep.deposit (coord, force);
ep.calculate();
// ep.print_x (("real.x.out"));
ep.print_x_avg (("a.real.x.out"));
// ep.print_xy (("real.xy.out"));
return 0;
}
|
157128fd58f06075978b194c1529860dd2be4ba2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
*****************
Problem Statement
*****************
Program to calculate square of first 500 natural numbers on GPU.
****************************************************************************************/
#include<stdio.h>
#include<conio.h>
__global__ void square_array(float *ad, int N)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
ad[index] *= ad[index]; //adding values in GPU memory
}
int main()
{
float *a;
float *ad;
int N = 500; //500 natural numbers
unsigned int i, No_of_blocks, No_of_threads;
size_t size = sizeof(float) * N;
//allocate memory on host
a=(float*)malloc(size);
//allocate memory on device
hipMalloc(&ad,size);
//printf("\nAfter hipMalloc for ad\n%s\n",hipGetErrorString(hipGetLastError()));
//initialize host memory with its own indices
for(i=0; i<N; i++)
{
a[i]=(float)i;
}
//copy data from host memory to device memory
hipMemcpy(ad,a,size,hipMemcpyHostToDevice);
//printf("\nAfter HostToDevice Memcpy for ad\n%s\n",hipGetErrorString(hipGetLastError()));
//calculate execution configuration
if (N > 512)
{
No_of_threads = 512;
No_of_blocks = (N / 512) + (((N % 512) == 0) ? 0 : 1);
}
else
{
No_of_threads = N;
No_of_blocks = 1;
}
dim3 block (No_of_threads, 1, 1);
dim3 grid (No_of_blocks, 1, 1);
//GPU timer code
float time;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//launch kernel with optimum threads
hipLaunchKernelGGL(( square_array), dim3(grid), dim3(block) , 0, 0, ad,N);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time,start,stop); //time taken in kernel call calculated
hipEventDestroy(start);
hipEventDestroy(stop);
//copy back the results
hipMemcpy(a,ad,size,hipMemcpyDeviceToHost);
//printf("\nAfter DeviceToHost Memcpy for a\n%s\n",hipGetErrorString(hipGetLastError()));
//print the results
printf("\nAddition of above two VECTORS on GPU evaluates to = \n");
for (i = 0; i < N; i++)
printf("%f\n", a[i]); //if correctly evaluated, all values will be 0
printf("\n\nTime taken is %f (ms)\n",time);
//deallocate host and device memories
hipFree(ad);
free(a);
_getch();
return 1;
} | 157128fd58f06075978b194c1529860dd2be4ba2.cu | *****************
Problem Statement
*****************
Program to calculate square of first 500 natural numbers on GPU.
****************************************************************************************/
#include<stdio.h>
#include<conio.h>
__global__ void square_array(float *ad, int N)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
ad[index] *= ad[index]; //adding values in GPU memory
}
int main()
{
float *a;
float *ad;
int N = 500; //500 natural numbers
unsigned int i, No_of_blocks, No_of_threads;
size_t size = sizeof(float) * N;
//allocate memory on host
a=(float*)malloc(size);
//allocate memory on device
cudaMalloc(&ad,size);
//printf("\nAfter cudaMalloc for ad\n%s\n",cudaGetErrorString(cudaGetLastError()));
//initialize host memory with its own indices
for(i=0; i<N; i++)
{
a[i]=(float)i;
}
//copy data from host memory to device memory
cudaMemcpy(ad,a,size,cudaMemcpyHostToDevice);
//printf("\nAfter HostToDevice Memcpy for ad\n%s\n",cudaGetErrorString(cudaGetLastError()));
//calculate execution configuration
if (N > 512)
{
No_of_threads = 512;
No_of_blocks = (N / 512) + (((N % 512) == 0) ? 0 : 1);
}
else
{
No_of_threads = N;
No_of_blocks = 1;
}
dim3 block (No_of_threads, 1, 1);
dim3 grid (No_of_blocks, 1, 1);
//GPU timer code
float time;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//launch kernel with optimum threads
square_array<<< grid, block >>>(ad,N);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time,start,stop); //time taken in kernel call calculated
cudaEventDestroy(start);
cudaEventDestroy(stop);
//copy back the results
cudaMemcpy(a,ad,size,cudaMemcpyDeviceToHost);
//printf("\nAfter DeviceToHost Memcpy for a\n%s\n",cudaGetErrorString(cudaGetLastError()));
//print the results
printf("\nAddition of above two VECTORS on GPU evaluates to = \n");
for (i = 0; i < N; i++)
printf("%f\n", a[i]); //if correctly evaluated, all values will be 0
printf("\n\nTime taken is %f (ms)\n",time);
//deallocate host and device memories
cudaFree(ad);
free(a);
_getch();
return 1;
} |
dbf1c2992a2f0a2baa038c2afdb18262cbaa165b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file deformable_psroi_pooling.cu
* \brief
* \author Yi Li, Guodong Zhang, Jifeng Dai
*/
#include "./deformable_psroi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "../../common/cuda/utils.h"
#include "../mxnet_op.h"
#define DeformablePSROIPOOLING_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template <typename DType>
__device__ DType bilinear_interp(const DType* data,
const DType x, const DType y,
const index_t width, const index_t height) {
index_t x1 = floor(x);
index_t x2 = ceil(x);
index_t y1 = floor(y);
index_t y2 = ceil(y);
DType dist_x = static_cast<DType>(x - x1);
DType dist_y = static_cast<DType>(y - y1);
DType value11 = data[y1 * width + x1];
DType value12 = data[y2 * width + x1];
DType value21 = data[y1 * width + x2];
DType value22 = data[y2 * width + x2];
DType value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 +
dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22;
return value;
}
template <typename DType>
__global__ void DeformablePSROIPoolForwardKernel(const index_t count,
const DType* bottom_data,
const DType spatial_scale,
const index_t channels,
const index_t height, const index_t width,
const index_t pooled_height,
const index_t pooled_width,
const DType* bottom_rois,
const DType* bottom_trans,
const bool no_trans, const DType trans_std,
const index_t sample_per_part,
const index_t output_dim,
const index_t group_size,
const index_t part_size,
const index_t num_classes,
const index_t channels_each_class,
DType* top_data, DType* top_count) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
index_t pw = index % pooled_width;
index_t ph = (index / pooled_width) % pooled_height;
index_t ctop = (index / pooled_width / pooled_height) % output_dim;
index_t n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
index_t roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part);
DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part);
index_t part_h = floor(static_cast<DType>(ph) / pooled_height * part_size);
index_t part_w = floor(static_cast<DType>(pw) / pooled_width * part_size);
index_t class_id = ctop / channels_each_class;
DType trans_x = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType trans_y = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType wstart = static_cast<DType>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
DType sum = 0;
index_t count = 0;
index_t gw = floor(static_cast<DType>(pw) * group_size / pooled_width);
index_t gh = floor(static_cast<DType>(ph) * group_size / pooled_height);
gw = min(max(gw, static_cast<index_t>(0)), group_size - 1);
gh = min(max(gh, static_cast<index_t>(0)), group_size - 1);
const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
for (index_t ih = 0; ih < sample_per_part; ih++) {
for (index_t iw = 0; iw < sample_per_part; iw++) {
DType w = wstart + iw * sub_bin_size_w;
DType h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
index_t c = (ctop * group_size + gh) * group_size + gw;
DType val = bilinear_interp(offset_bottom_data + c * height * width,
w, h, width, height);
sum += val;
count++;
}
}
top_data[index] = count == 0 ? static_cast<DType>(0) : sum / count;
top_count[index] = count;
}
}
template<typename DType>
inline void DeformablePSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans, const float spatial_scale,
const index_t output_dim, const index_t group_size,
const index_t pooled_size, const index_t part_size,
const index_t sample_per_part, const float trans_std) {
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
const DType *bottom_trans = no_trans ? nullptr : trans.dptr_;
DType *top_data = out.dptr_;
DType *top_count_data = top_count.dptr_;
const index_t count = out.shape_.Size();
const index_t channels = data.size(1);
const index_t height = data.size(2);
const index_t width = data.size(3);
const index_t pooled_height = pooled_size;
const index_t pooled_width = pooled_size;
const index_t num_classes = no_trans ? 1 : trans.size(1) / 2;
const index_t channels_each_class = no_trans ? output_dim : output_dim / num_classes;
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
hipLaunchKernelGGL(( DeformablePSROIPoolForwardKernel<DType>),
dim3(mxnet::op::mxnet_op::cuda_get_num_blocks(count)), dim3(kBaseThreadNum),
0, stream, count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, bottom_trans,
no_trans, trans_std, sample_per_part, output_dim,
group_size, part_size, num_classes,
channels_each_class, top_data, top_count_data);
DeformablePSROIPOOLING_CUDA_CHECK(hipGetLastError());
}
template <typename DType>
__global__ void DeformablePSROIPoolBackwardAccKernel(const index_t count,
const DType* top_diff,
const DType* top_count,
const index_t num_rois,
const DType spatial_scale,
const index_t channels,
const index_t height,
const index_t width,
const index_t pooled_height,
const index_t pooled_width,
const index_t output_dim,
DType* bottom_data_diff,
DType* bottom_trans_diff,
const DType* bottom_data,
const DType* bottom_rois,
const DType* bottom_trans,
const bool no_trans,
const DType trans_std,
const index_t sample_per_part,
const index_t group_size,
const index_t part_size,
const index_t num_classes,
const index_t channels_each_class) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
index_t pw = index % pooled_width;
index_t ph = (index / pooled_width) % pooled_height;
index_t ctop = (index / pooled_width / pooled_height) % output_dim;
index_t n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
index_t roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part);
DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part);
index_t part_h = floor(static_cast<DType>(ph) / pooled_height * part_size);
index_t part_w = floor(static_cast<DType>(pw) / pooled_width * part_size);
index_t class_id = ctop / channels_each_class;
DType trans_x = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType trans_y = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType wstart = static_cast<DType>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0) {
continue;
}
DType diff_val = top_diff[index] / top_count[index];
const DType* offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
DType* offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
index_t gw = floor(static_cast<DType>(pw) * group_size / pooled_width);
index_t gh = floor(static_cast<DType>(ph) * group_size / pooled_height);
gw = min(max(gw, static_cast<index_t>(0)), group_size - 1);
gh = min(max(gh, static_cast<index_t>(0)), group_size - 1);
for (index_t ih = 0; ih < sample_per_part; ih++) {
for (index_t iw = 0; iw < sample_per_part; iw++) {
DType w = wstart + iw * sub_bin_size_w;
DType h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
index_t c = (ctop * group_size + gh) * group_size + gw;
// backward on feature
index_t x0 = floor(w);
index_t x1 = ceil(w);
index_t y0 = floor(h);
index_t y1 = ceil(h);
DType dist_x = w - x0, dist_y = h - y0;
DType q00 = (1 - dist_x) * (1 - dist_y);
DType q01 = (1 - dist_x) * dist_y;
DType q10 = dist_x * (1 - dist_y);
DType q11 = dist_x * dist_y;
index_t bottom_index_base = c * height * width;
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val);
if (no_trans) {
continue;
}
DType U00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
DType U01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
DType U10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
DType U11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
DType diff_x = U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y);
diff_x *= trans_std * diff_val * roi_width;
DType diff_y = U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x);
diff_y *= trans_std * diff_val * roi_height;
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2)
* part_size + part_h)
* part_size + part_w, diff_x);
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1)
* part_size + part_h)
* part_size + part_w, diff_y);
}
}
}
}
template<typename DType>
inline void DeformablePSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &trans_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans, const float spatial_scale,
const index_t output_dim, const index_t group_size,
const index_t pooled_size, const index_t part_size,
const index_t sample_per_part, const float trans_std) {
const DType *top_diff = out_grad.dptr_;
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
const DType *bottom_trans = no_trans ? nullptr : trans.dptr_;
DType *bottom_data_diff = in_grad.dptr_;
DType *bottom_trans_diff = no_trans ? nullptr : trans_grad.dptr_;
const DType *top_count_data = top_count.dptr_;
const index_t count = out_grad.shape_.Size();
const index_t num_rois = bbox.size(0);
const index_t channels = in_grad.size(1);
const index_t height = in_grad.size(2);
const index_t width = in_grad.size(3);
const index_t pooled_height = pooled_size;
const index_t pooled_width = pooled_size;
const index_t num_classes = no_trans ? 1 : trans_grad.size(1) / 2;
const index_t channels_each_class = no_trans ? output_dim : output_dim / num_classes;
hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
hipLaunchKernelGGL(( DeformablePSROIPoolBackwardAccKernel<DType>),
dim3(mxnet::op::mxnet_op::cuda_get_num_blocks(count)), dim3(kBaseThreadNum),
0, stream , count, top_diff, top_count_data, num_rois, spatial_scale,
channels, height, width, pooled_height, pooled_width,
output_dim, bottom_data_diff, bottom_trans_diff,
bottom_data, bottom_rois, bottom_trans,
no_trans, trans_std, sample_per_part, group_size,
part_size, num_classes, channels_each_class);
DeformablePSROIPOOLING_CUDA_CHECK(hipGetLastError());
}
} // namespace cuda
template<typename DType>
inline void DeformablePSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans, const float spatial_scale,
const index_t output_dim, const index_t group_size,
const index_t pooled_size, const index_t part_size,
const index_t sample_per_part, const float trans_std) {
cuda::DeformablePSROIPoolForward(out, data, bbox, trans, top_count,
no_trans, spatial_scale, output_dim,
group_size, pooled_size, part_size,
sample_per_part, trans_std);
}
template<typename DType>
inline void DeformablePSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &trans_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans, const float spatial_scale,
const index_t output_dim, const index_t group_size,
const index_t pooled_size, const index_t part_size,
const index_t sample_per_part, const float trans_std) {
cuda::DeformablePSROIPoolBackwardAcc(in_grad, trans_grad, out_grad, data, bbox,
trans, top_count, no_trans, spatial_scale,
output_dim, group_size, pooled_size,
part_size, sample_per_part, trans_std);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(DeformablePSROIPoolingParam param, int dtype) {
Operator* op = nullptr;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new DeformablePSROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| dbf1c2992a2f0a2baa038c2afdb18262cbaa165b.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file deformable_psroi_pooling.cu
* \brief
* \author Yi Li, Guodong Zhang, Jifeng Dai
*/
#include "./deformable_psroi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "../../common/cuda/utils.h"
#include "../mxnet_op.h"
#define DeformablePSROIPOOLING_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template <typename DType>
__device__ DType bilinear_interp(const DType* data,
const DType x, const DType y,
const index_t width, const index_t height) {
index_t x1 = floor(x);
index_t x2 = ceil(x);
index_t y1 = floor(y);
index_t y2 = ceil(y);
DType dist_x = static_cast<DType>(x - x1);
DType dist_y = static_cast<DType>(y - y1);
DType value11 = data[y1 * width + x1];
DType value12 = data[y2 * width + x1];
DType value21 = data[y1 * width + x2];
DType value22 = data[y2 * width + x2];
DType value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 +
dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22;
return value;
}
template <typename DType>
__global__ void DeformablePSROIPoolForwardKernel(const index_t count,
const DType* bottom_data,
const DType spatial_scale,
const index_t channels,
const index_t height, const index_t width,
const index_t pooled_height,
const index_t pooled_width,
const DType* bottom_rois,
const DType* bottom_trans,
const bool no_trans, const DType trans_std,
const index_t sample_per_part,
const index_t output_dim,
const index_t group_size,
const index_t part_size,
const index_t num_classes,
const index_t channels_each_class,
DType* top_data, DType* top_count) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
index_t pw = index % pooled_width;
index_t ph = (index / pooled_width) % pooled_height;
index_t ctop = (index / pooled_width / pooled_height) % output_dim;
index_t n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
index_t roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part);
DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part);
index_t part_h = floor(static_cast<DType>(ph) / pooled_height * part_size);
index_t part_w = floor(static_cast<DType>(pw) / pooled_width * part_size);
index_t class_id = ctop / channels_each_class;
DType trans_x = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType trans_y = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType wstart = static_cast<DType>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
DType sum = 0;
index_t count = 0;
index_t gw = floor(static_cast<DType>(pw) * group_size / pooled_width);
index_t gh = floor(static_cast<DType>(ph) * group_size / pooled_height);
gw = min(max(gw, static_cast<index_t>(0)), group_size - 1);
gh = min(max(gh, static_cast<index_t>(0)), group_size - 1);
const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
for (index_t ih = 0; ih < sample_per_part; ih++) {
for (index_t iw = 0; iw < sample_per_part; iw++) {
DType w = wstart + iw * sub_bin_size_w;
DType h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
index_t c = (ctop * group_size + gh) * group_size + gw;
DType val = bilinear_interp(offset_bottom_data + c * height * width,
w, h, width, height);
sum += val;
count++;
}
}
top_data[index] = count == 0 ? static_cast<DType>(0) : sum / count;
top_count[index] = count;
}
}
template<typename DType>
inline void DeformablePSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans, const float spatial_scale,
const index_t output_dim, const index_t group_size,
const index_t pooled_size, const index_t part_size,
const index_t sample_per_part, const float trans_std) {
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
const DType *bottom_trans = no_trans ? nullptr : trans.dptr_;
DType *top_data = out.dptr_;
DType *top_count_data = top_count.dptr_;
const index_t count = out.shape_.Size();
const index_t channels = data.size(1);
const index_t height = data.size(2);
const index_t width = data.size(3);
const index_t pooled_height = pooled_size;
const index_t pooled_width = pooled_size;
const index_t num_classes = no_trans ? 1 : trans.size(1) / 2;
const index_t channels_each_class = no_trans ? output_dim : output_dim / num_classes;
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
DeformablePSROIPoolForwardKernel<DType><<<
mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum,
0, stream>>>(count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, bottom_trans,
no_trans, trans_std, sample_per_part, output_dim,
group_size, part_size, num_classes,
channels_each_class, top_data, top_count_data);
DeformablePSROIPOOLING_CUDA_CHECK(cudaGetLastError());
}
template <typename DType>
__global__ void DeformablePSROIPoolBackwardAccKernel(const index_t count,
const DType* top_diff,
const DType* top_count,
const index_t num_rois,
const DType spatial_scale,
const index_t channels,
const index_t height,
const index_t width,
const index_t pooled_height,
const index_t pooled_width,
const index_t output_dim,
DType* bottom_data_diff,
DType* bottom_trans_diff,
const DType* bottom_data,
const DType* bottom_rois,
const DType* bottom_trans,
const bool no_trans,
const DType trans_std,
const index_t sample_per_part,
const index_t group_size,
const index_t part_size,
const index_t num_classes,
const index_t channels_each_class) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
index_t pw = index % pooled_width;
index_t ph = (index / pooled_width) % pooled_height;
index_t ctop = (index / pooled_width / pooled_height) % output_dim;
index_t n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
index_t roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part);
DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part);
index_t part_h = floor(static_cast<DType>(ph) / pooled_height * part_size);
index_t part_w = floor(static_cast<DType>(pw) / pooled_width * part_size);
index_t class_id = ctop / channels_each_class;
DType trans_x = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType trans_y = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType wstart = static_cast<DType>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0) {
continue;
}
DType diff_val = top_diff[index] / top_count[index];
const DType* offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
DType* offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
index_t gw = floor(static_cast<DType>(pw) * group_size / pooled_width);
index_t gh = floor(static_cast<DType>(ph) * group_size / pooled_height);
gw = min(max(gw, static_cast<index_t>(0)), group_size - 1);
gh = min(max(gh, static_cast<index_t>(0)), group_size - 1);
for (index_t ih = 0; ih < sample_per_part; ih++) {
for (index_t iw = 0; iw < sample_per_part; iw++) {
DType w = wstart + iw * sub_bin_size_w;
DType h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
index_t c = (ctop * group_size + gh) * group_size + gw;
// backward on feature
index_t x0 = floor(w);
index_t x1 = ceil(w);
index_t y0 = floor(h);
index_t y1 = ceil(h);
DType dist_x = w - x0, dist_y = h - y0;
DType q00 = (1 - dist_x) * (1 - dist_y);
DType q01 = (1 - dist_x) * dist_y;
DType q10 = dist_x * (1 - dist_y);
DType q11 = dist_x * dist_y;
index_t bottom_index_base = c * height * width;
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val);
if (no_trans) {
continue;
}
DType U00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
DType U01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
DType U10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
DType U11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
DType diff_x = U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y);
diff_x *= trans_std * diff_val * roi_width;
DType diff_y = U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x);
diff_y *= trans_std * diff_val * roi_height;
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2)
* part_size + part_h)
* part_size + part_w, diff_x);
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1)
* part_size + part_h)
* part_size + part_w, diff_y);
}
}
}
}
template<typename DType>
inline void DeformablePSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &trans_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans, const float spatial_scale,
const index_t output_dim, const index_t group_size,
const index_t pooled_size, const index_t part_size,
const index_t sample_per_part, const float trans_std) {
const DType *top_diff = out_grad.dptr_;
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
const DType *bottom_trans = no_trans ? nullptr : trans.dptr_;
DType *bottom_data_diff = in_grad.dptr_;
DType *bottom_trans_diff = no_trans ? nullptr : trans_grad.dptr_;
const DType *top_count_data = top_count.dptr_;
const index_t count = out_grad.shape_.Size();
const index_t num_rois = bbox.size(0);
const index_t channels = in_grad.size(1);
const index_t height = in_grad.size(2);
const index_t width = in_grad.size(3);
const index_t pooled_height = pooled_size;
const index_t pooled_width = pooled_size;
const index_t num_classes = no_trans ? 1 : trans_grad.size(1) / 2;
const index_t channels_each_class = no_trans ? output_dim : output_dim / num_classes;
cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
DeformablePSROIPoolBackwardAccKernel<DType><<<
mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum,
0, stream >>>(count, top_diff, top_count_data, num_rois, spatial_scale,
channels, height, width, pooled_height, pooled_width,
output_dim, bottom_data_diff, bottom_trans_diff,
bottom_data, bottom_rois, bottom_trans,
no_trans, trans_std, sample_per_part, group_size,
part_size, num_classes, channels_each_class);
DeformablePSROIPOOLING_CUDA_CHECK(cudaGetLastError());
}
} // namespace cuda
template<typename DType>
inline void DeformablePSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans, const float spatial_scale,
const index_t output_dim, const index_t group_size,
const index_t pooled_size, const index_t part_size,
const index_t sample_per_part, const float trans_std) {
cuda::DeformablePSROIPoolForward(out, data, bbox, trans, top_count,
no_trans, spatial_scale, output_dim,
group_size, pooled_size, part_size,
sample_per_part, trans_std);
}
template<typename DType>
inline void DeformablePSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &trans_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans, const float spatial_scale,
const index_t output_dim, const index_t group_size,
const index_t pooled_size, const index_t part_size,
const index_t sample_per_part, const float trans_std) {
cuda::DeformablePSROIPoolBackwardAcc(in_grad, trans_grad, out_grad, data, bbox,
trans, top_count, no_trans, spatial_scale,
output_dim, group_size, pooled_size,
part_size, sample_per_part, trans_std);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(DeformablePSROIPoolingParam param, int dtype) {
Operator* op = nullptr;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new DeformablePSROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
8f6985a56d31b1a1aacc64a91e058126dc5d8613.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string>
#include <thread>
#include <chrono>
#include <memory>
#include <hip/hip_runtime.h>
#include <cudnn.h>
//#include <boost/program_options.hpp>
#include "spdlog/spdlog.h"
using namespace std;
//using namespace boost::program_options;
namespace spd = spdlog;
//////////////////////
// kernel functions //
//////////////////////
///////////
// Class //
///////////
static int number = 0;
template <typename T>
class GPUTest
{
public:
GPUTest(const int id);
void allocate();
void graduallyAllocate();
void showDeviceInfo();
void getDeviceInfo();
virtual ~GPUTest();
void _setLog(void);
std::shared_ptr<spd::logger> console;
hipError_t err;
private:
long long int size;
int deviceID;
unsigned long long int totalGlobalMem;
// for device prof;
};
template <typename T>
void GPUTest<T>::_setLog(void)
{
try {
number++;
// console->info(function_name, message);
string s = "GPUTest" + to_string(number);
console = spd::stdout_color_mt(s);
spd::set_level(spd::level::debug);
console->info("SET: console log.");
}catch (const spd::spdlog_ex& ex){
cout << "Log init failed: " << ex.what() << endl;
}
}
template <typename T>
GPUTest<T>::GPUTest(const int id)
{
this->_setLog();
console->info("SET: GPU Device ID [{}].", id);
hipSetDevice(id);
deviceID = id;
}
template <typename T>
GPUTest<T>::~GPUTest()
{
hipDeviceReset();
}
template <typename T>
void GPUTest<T>::graduallyAllocate(void)
{
console->info("TEST: Gradual increasment of Memory Allocation on gpu device.");
size_t mb_size = 1 << 10 << 10;
int increase_factor = 1 << 5;
T *fd, *fh;
int maxsize = 8;
int i = 1;
int total = 0;
int sleep_seconds = 60;
console->info("TEST: MAX Allocation Size {} GB on gpu device.", maxsize);
console->info("SET: Increase factor {} MB on gpu device.", increase_factor);
console->info("TEST: unit = [{}bytes].", sizeof(T));
console->info("Increasement scedule is [mb_unit [{}*1024*1024] * Increase factor[{}] * time step]."
, sizeof(T), increase_factor);
while(maxsize * 1024 > sizeof(T) * increase_factor * i){
console->info("Allocation: {} MB on GPU. ", sizeof(T) * increase_factor * i);
total += sizeof(T) * increase_factor * 1024 * 1024 * i;
err = hipMalloc((void **)&fd, sizeof(T) * increase_factor * 1024 * 1024 * i);
if(hipSuccess != err){
console->info("Error: can't allocate {} MB on GPU.", sizeof(T) * increase_factor * i);
err = hipMalloc((void **)&fd, sizeof(T) * increase_factor * 1024 * 1024 * --i);
console->info("Reallocation: {} MB on GPU. ", sizeof(T) * increase_factor * i);
console->info("SLEEP: {} seconds.", sleep_seconds);
console->info("CHECK: stress on your [{}] device via another application.", sleep_seconds);
std::this_thread::sleep_for(std::chrono::seconds(sleep_seconds));
hipFree(fd);
break;
}
hipFree(fd);
++i;
}
hipDeviceSynchronize();
}
template <typename T>
void GPUTest<T>::getDeviceInfo(void)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, deviceID);
totalGlobalMem = deviceProp.totalGlobalMem;
printf(" Total amount of global memory: %.2f GBytes (%llu "
"bytes)\n", (float)totalGlobalMem / pow(1024.0, 3),
(unsigned long long)totalGlobalMem);
}
template <typename T>
void GPUTest<T>::showDeviceInfo(void)
{
int dev = 0, driverVersion = 0, runtimeVersion = 0;
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10,
runtimeVersion / 1000, (runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.2f GBytes (%llu "
"bytes)\n", (float)deviceProp.totalGlobalMem / pow(1024.0, 3),
(unsigned long long)deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%0.2f "
"GHz)\n", deviceProp.clockRate * 1e-3f,
deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f Mhz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), "
"2D=(%d,%d), 3D=(%d,%d,%d)\n", deviceProp.maxTexture1D,
deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1],
deviceProp.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, "
"2D=(%d,%d) x %d\n", deviceProp.maxTexture1DLayered[0],
deviceProp.maxTexture1DLayered[1], deviceProp.maxTexture2DLayered[0],
deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
}
///////////////////
// parse options //
///////////////////
///////////////////
// main function //
///////////////////
int main(int argc, char *argv[])
{
int deviceID=1;
// TODO:
// To change argments parser from boost to Taywee/args?
/*
options_description options1("This programm does GPU stress test.");
options1.add_options()
("help,h", "help mesage.")
("deviceid,d", value<int>(), "set DeviceId of GPU.");
//("memory_allocation_size,s", "set Memory allocation size (Mb).");
variables_map values;
try{
store(parse_command_line(argc, argv, options1), values);
notify(values);
if (values.count("help")) {
cout << options1 << endl;
exit(EXIT_FAILURE);
}
if (!values.count("deviceid")) {
// options_description
cout << options1 << endl;
exit(EXIT_FAILURE);
}
if (values.count("deviceid"))
cout << "set DeviceId: " << endl;
//cout << "set DeviceId: " << values["deviceid"].as<string>() << endl;
}catch(std::exception &e){
std::cout << e.what() << std::endl;
exit(EXIT_FAILURE);
}
*/
console->info("CHECK: data type [int].");
GPUTest<int> *g = new GPUTest<int>(deviceID);
g->getDeviceInfo();
g->graduallyAllocate();
console->info("CHECK: data type [float].");
GPUTest<float> *fg = new GPUTest<float>(deviceID);
fg->getDeviceInfo();
fg->graduallyAllocate();
console->info("CHECK: data type [double].");
GPUTest<double> *dg = new GPUTest<double>(deviceID);
dg->getDeviceInfo();
dg->graduallyAllocate();
console->info("CHECK: data type [doube].");
GPUTest<char> *cg = new GPUTest<char>(deviceID);
cg->getDeviceInfo();
cg->graduallyAllocate();
hipDeviceReset();
return 0;
}
#include <iostream>
#include <string>
#include <thread>
#include <chrono>
#include <memory>
#include <hip/hip_runtime.h>
#include <cudnn.h>
//#include <boost/program_options.hpp>
#include "spdlog/spdlog.h"
using namespace std;
//using namespace boost::program_options;
namespace spd = spdlog;
//////////////////////
// kernel functions //
//////////////////////
///////////
// Class //
///////////
static int number = 0;
template <typename T>
class GPUTest
{
public:
GPUTest(const int id);
void allocate();
void graduallyAllocate();
void showDeviceInfo();
void getDeviceInfo();
virtual ~GPUTest();
void _setLog(void);
std::shared_ptr<spd::logger> console;
hipError_t err;
private:
long long int size;
int deviceID;
unsigned long long int totalGlobalMem;
// for device prof;
};
template <typename T>
void GPUTest<T>::_setLog(void)
{
try {
number++;
// console->info(function_name, message);
string s = "GPUTest" + to_string(number);
console = spd::stdout_color_mt(s);
spd::set_level(spd::level::debug);
console->info("SET: console log.");
}catch (const spd::spdlog_ex& ex){
cout << "Log init failed: " << ex.what() << endl;
}
}
template <typename T>
GPUTest<T>::GPUTest(const int id)
{
this->_setLog();
console->info("SET: GPU Device ID [{}].", id);
hipSetDevice(id);
deviceID = id;
}
template <typename T>
GPUTest<T>::~GPUTest()
{
hipDeviceReset();
}
template <typename T>
void GPUTest<T>::graduallyAllocate(void)
{
console->info("TEST: Gradual increasment of Memory Allocation on gpu device.");
size_t mb_size = 1 << 10 << 10;
int increase_factor = 1 << 5;
T *fd, *fh;
int maxsize = 8;
int i = 1;
int total = 0;
int sleep_seconds = 60;
console->info("TEST: MAX Allocation Size {} GB on gpu device.", maxsize);
console->info("SET: Increase factor {} MB on gpu device.", increase_factor);
console->info("TEST: unit = [{}bytes].", sizeof(T));
console->info("Increasement scedule is [mb_unit [{}*1024*1024] * Increase factor[{}] * time step]."
, sizeof(T), increase_factor);
while(maxsize * 1024 > sizeof(T) * increase_factor * i){
console->info("Allocation: {} MB on GPU. ", sizeof(T) * increase_factor * i);
total += sizeof(T) * increase_factor * 1024 * 1024 * i;
err = hipMalloc((void **)&fd, sizeof(T) * increase_factor * 1024 * 1024 * i);
if(hipSuccess != err){
console->info("Error: can't allocate {} MB on GPU.", sizeof(T) * increase_factor * i);
err = hipMalloc((void **)&fd, sizeof(T) * increase_factor * 1024 * 1024 * --i);
console->info("Reallocation: {} MB on GPU. ", sizeof(T) * increase_factor * i);
console->info("SLEEP: {} seconds.", sleep_seconds);
console->info("CHECK: stress on your [{}] device via another application.", sleep_seconds);
std::this_thread::sleep_for(std::chrono::seconds(sleep_seconds));
hipFree(fd);
break;
}
hipFree(fd);
++i;
}
hipDeviceSynchronize();
}
template <typename T>
void GPUTest<T>::getDeviceInfo(void)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, deviceID);
totalGlobalMem = deviceProp.totalGlobalMem;
printf(" Total amount of global memory: %.2f GBytes (%llu "
"bytes)\n", (float)totalGlobalMem / pow(1024.0, 3),
(unsigned long long)totalGlobalMem);
}
template <typename T>
void GPUTest<T>::showDeviceInfo(void)
{
int dev = 0, driverVersion = 0, runtimeVersion = 0;
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10,
runtimeVersion / 1000, (runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.2f GBytes (%llu "
"bytes)\n", (float)deviceProp.totalGlobalMem / pow(1024.0, 3),
(unsigned long long)deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%0.2f "
"GHz)\n", deviceProp.clockRate * 1e-3f,
deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f Mhz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), "
"2D=(%d,%d), 3D=(%d,%d,%d)\n", deviceProp.maxTexture1D,
deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1],
deviceProp.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, "
"2D=(%d,%d) x %d\n", deviceProp.maxTexture1DLayered[0],
deviceProp.maxTexture1DLayered[1], deviceProp.maxTexture2DLayered[0],
deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
}
///////////////////
// parse options //
///////////////////
///////////////////
// main function //
///////////////////
int main(int argc, char *argv[])
{
int deviceID=1;
// TODO:
// To change argments parser from boost to Taywee/args?
/*
options_description options1("This programm does GPU stress test.");
options1.add_options()
("help,h", "help mesage.")
("deviceid,d", value<int>(), "set DeviceId of GPU.");
//("memory_allocation_size,s", "set Memory allocation size (Mb).");
variables_map values;
try{
store(parse_command_line(argc, argv, options1), values);
notify(values);
if (values.count("help")) {
cout << options1 << endl;
exit(EXIT_FAILURE);
}
if (!values.count("deviceid")) {
// options_description
cout << options1 << endl;
exit(EXIT_FAILURE);
}
if (values.count("deviceid"))
cout << "set DeviceId: " << endl;
//cout << "set DeviceId: " << values["deviceid"].as<string>() << endl;
}catch(std::exception &e){
std::cout << e.what() << std::endl;
exit(EXIT_FAILURE);
}
*/
console->info("CHECK: data type [int].");
GPUTest<int> *g = new GPUTest<int>(deviceID);
g->getDeviceInfo();
g->graduallyAllocate();
console->info("CHECK: data type [float].");
GPUTest<float> *fg = new GPUTest<float>(deviceID);
fg->getDeviceInfo();
fg->graduallyAllocate();
console->info("CHECK: data type [double].");
GPUTest<double> *dg = new GPUTest<double>(deviceID);
dg->getDeviceInfo();
dg->graduallyAllocate();
console->info("CHECK: data type [doube].");
GPUTest<char> *cg = new GPUTest<char>(deviceID);
cg->getDeviceInfo();
cg->graduallyAllocate();
hipDeviceReset();
return 0;
}
| 8f6985a56d31b1a1aacc64a91e058126dc5d8613.cu | #include <iostream>
#include <string>
#include <thread>
#include <chrono>
#include <memory>
#include <cuda_runtime.h>
#include <cudnn.h>
//#include <boost/program_options.hpp>
#include "spdlog/spdlog.h"
using namespace std;
//using namespace boost::program_options;
namespace spd = spdlog;
//////////////////////
// kernel functions //
//////////////////////
///////////
// Class //
///////////
static int number = 0;
template <typename T>
class GPUTest
{
public:
GPUTest(const int id);
void allocate();
void graduallyAllocate();
void showDeviceInfo();
void getDeviceInfo();
virtual ~GPUTest();
void _setLog(void);
std::shared_ptr<spd::logger> console;
cudaError_t err;
private:
long long int size;
int deviceID;
unsigned long long int totalGlobalMem;
// for device prof;
};
template <typename T>
void GPUTest<T>::_setLog(void)
{
try {
number++;
// console->info(function_name, message);
string s = "GPUTest" + to_string(number);
console = spd::stdout_color_mt(s);
spd::set_level(spd::level::debug);
console->info("SET: console log.");
}catch (const spd::spdlog_ex& ex){
cout << "Log init failed: " << ex.what() << endl;
}
}
template <typename T>
GPUTest<T>::GPUTest(const int id)
{
this->_setLog();
console->info("SET: GPU Device ID [{}].", id);
cudaSetDevice(id);
deviceID = id;
}
template <typename T>
GPUTest<T>::~GPUTest()
{
cudaDeviceReset();
}
template <typename T>
void GPUTest<T>::graduallyAllocate(void)
{
console->info("TEST: Gradual increasment of Memory Allocation on gpu device.");
size_t mb_size = 1 << 10 << 10;
int increase_factor = 1 << 5;
T *fd, *fh;
int maxsize = 8;
int i = 1;
int total = 0;
int sleep_seconds = 60;
console->info("TEST: MAX Allocation Size {} GB on gpu device.", maxsize);
console->info("SET: Increase factor {} MB on gpu device.", increase_factor);
console->info("TEST: unit = [{}bytes].", sizeof(T));
console->info("Increasement scedule is [mb_unit [{}*1024*1024] * Increase factor[{}] * time step]."
, sizeof(T), increase_factor);
while(maxsize * 1024 > sizeof(T) * increase_factor * i){
console->info("Allocation: {} MB on GPU. ", sizeof(T) * increase_factor * i);
total += sizeof(T) * increase_factor * 1024 * 1024 * i;
err = cudaMalloc((void **)&fd, sizeof(T) * increase_factor * 1024 * 1024 * i);
if(cudaSuccess != err){
console->info("Error: can't allocate {} MB on GPU.", sizeof(T) * increase_factor * i);
err = cudaMalloc((void **)&fd, sizeof(T) * increase_factor * 1024 * 1024 * --i);
console->info("Reallocation: {} MB on GPU. ", sizeof(T) * increase_factor * i);
console->info("SLEEP: {} seconds.", sleep_seconds);
console->info("CHECK: stress on your [{}] device via another application.", sleep_seconds);
std::this_thread::sleep_for(std::chrono::seconds(sleep_seconds));
cudaFree(fd);
break;
}
cudaFree(fd);
++i;
}
cudaDeviceSynchronize();
}
template <typename T>
void GPUTest<T>::getDeviceInfo(void)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, deviceID);
totalGlobalMem = deviceProp.totalGlobalMem;
printf(" Total amount of global memory: %.2f GBytes (%llu "
"bytes)\n", (float)totalGlobalMem / pow(1024.0, 3),
(unsigned long long)totalGlobalMem);
}
template <typename T>
void GPUTest<T>::showDeviceInfo(void)
{
int dev = 0, driverVersion = 0, runtimeVersion = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10,
runtimeVersion / 1000, (runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.2f GBytes (%llu "
"bytes)\n", (float)deviceProp.totalGlobalMem / pow(1024.0, 3),
(unsigned long long)deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%0.2f "
"GHz)\n", deviceProp.clockRate * 1e-3f,
deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f Mhz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), "
"2D=(%d,%d), 3D=(%d,%d,%d)\n", deviceProp.maxTexture1D,
deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1],
deviceProp.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, "
"2D=(%d,%d) x %d\n", deviceProp.maxTexture1DLayered[0],
deviceProp.maxTexture1DLayered[1], deviceProp.maxTexture2DLayered[0],
deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
}
///////////////////
// parse options //
///////////////////
///////////////////
// main function //
///////////////////
int main(int argc, char *argv[])
{
int deviceID=1;
// TODO:
// To change argments parser from boost to Taywee/args?
/*
options_description options1("This programm does GPU stress test.");
options1.add_options()
("help,h", "help mesage.")
("deviceid,d", value<int>(), "set DeviceId of GPU.");
//("memory_allocation_size,s", "set Memory allocation size (Mb).");
variables_map values;
try{
store(parse_command_line(argc, argv, options1), values);
notify(values);
if (values.count("help")) {
cout << options1 << endl;
exit(EXIT_FAILURE);
}
if (!values.count("deviceid")) {
// options_description は標準出力に投げることが出来る
cout << options1 << endl;
exit(EXIT_FAILURE);
}
if (values.count("deviceid"))
cout << "set DeviceId: " << endl;
//cout << "set DeviceId: " << values["deviceid"].as<string>() << endl;
}catch(std::exception &e){
std::cout << e.what() << std::endl;
exit(EXIT_FAILURE);
}
*/
console->info("CHECK: data type [int].");
GPUTest<int> *g = new GPUTest<int>(deviceID);
g->getDeviceInfo();
g->graduallyAllocate();
console->info("CHECK: data type [float].");
GPUTest<float> *fg = new GPUTest<float>(deviceID);
fg->getDeviceInfo();
fg->graduallyAllocate();
console->info("CHECK: data type [double].");
GPUTest<double> *dg = new GPUTest<double>(deviceID);
dg->getDeviceInfo();
dg->graduallyAllocate();
console->info("CHECK: data type [doube].");
GPUTest<char> *cg = new GPUTest<char>(deviceID);
cg->getDeviceInfo();
cg->graduallyAllocate();
cudaDeviceReset();
return 0;
}
#include <iostream>
#include <string>
#include <thread>
#include <chrono>
#include <memory>
#include <cuda_runtime.h>
#include <cudnn.h>
//#include <boost/program_options.hpp>
#include "spdlog/spdlog.h"
using namespace std;
//using namespace boost::program_options;
namespace spd = spdlog;
//////////////////////
// kernel functions //
//////////////////////
///////////
// Class //
///////////
static int number = 0;
template <typename T>
class GPUTest
{
public:
GPUTest(const int id);
void allocate();
void graduallyAllocate();
void showDeviceInfo();
void getDeviceInfo();
virtual ~GPUTest();
void _setLog(void);
std::shared_ptr<spd::logger> console;
cudaError_t err;
private:
long long int size;
int deviceID;
unsigned long long int totalGlobalMem;
// for device prof;
};
template <typename T>
void GPUTest<T>::_setLog(void)
{
try {
number++;
// console->info(function_name, message);
string s = "GPUTest" + to_string(number);
console = spd::stdout_color_mt(s);
spd::set_level(spd::level::debug);
console->info("SET: console log.");
}catch (const spd::spdlog_ex& ex){
cout << "Log init failed: " << ex.what() << endl;
}
}
template <typename T>
GPUTest<T>::GPUTest(const int id)
{
this->_setLog();
console->info("SET: GPU Device ID [{}].", id);
cudaSetDevice(id);
deviceID = id;
}
template <typename T>
GPUTest<T>::~GPUTest()
{
cudaDeviceReset();
}
template <typename T>
void GPUTest<T>::graduallyAllocate(void)
{
console->info("TEST: Gradual increasment of Memory Allocation on gpu device.");
size_t mb_size = 1 << 10 << 10;
int increase_factor = 1 << 5;
T *fd, *fh;
int maxsize = 8;
int i = 1;
int total = 0;
int sleep_seconds = 60;
console->info("TEST: MAX Allocation Size {} GB on gpu device.", maxsize);
console->info("SET: Increase factor {} MB on gpu device.", increase_factor);
console->info("TEST: unit = [{}bytes].", sizeof(T));
console->info("Increasement scedule is [mb_unit [{}*1024*1024] * Increase factor[{}] * time step]."
, sizeof(T), increase_factor);
while(maxsize * 1024 > sizeof(T) * increase_factor * i){
console->info("Allocation: {} MB on GPU. ", sizeof(T) * increase_factor * i);
total += sizeof(T) * increase_factor * 1024 * 1024 * i;
err = cudaMalloc((void **)&fd, sizeof(T) * increase_factor * 1024 * 1024 * i);
if(cudaSuccess != err){
console->info("Error: can't allocate {} MB on GPU.", sizeof(T) * increase_factor * i);
err = cudaMalloc((void **)&fd, sizeof(T) * increase_factor * 1024 * 1024 * --i);
console->info("Reallocation: {} MB on GPU. ", sizeof(T) * increase_factor * i);
console->info("SLEEP: {} seconds.", sleep_seconds);
console->info("CHECK: stress on your [{}] device via another application.", sleep_seconds);
std::this_thread::sleep_for(std::chrono::seconds(sleep_seconds));
cudaFree(fd);
break;
}
cudaFree(fd);
++i;
}
cudaDeviceSynchronize();
}
template <typename T>
void GPUTest<T>::getDeviceInfo(void)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, deviceID);
totalGlobalMem = deviceProp.totalGlobalMem;
printf(" Total amount of global memory: %.2f GBytes (%llu "
"bytes)\n", (float)totalGlobalMem / pow(1024.0, 3),
(unsigned long long)totalGlobalMem);
}
template <typename T>
void GPUTest<T>::showDeviceInfo(void)
{
int dev = 0, driverVersion = 0, runtimeVersion = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10,
runtimeVersion / 1000, (runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.2f GBytes (%llu "
"bytes)\n", (float)deviceProp.totalGlobalMem / pow(1024.0, 3),
(unsigned long long)deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%0.2f "
"GHz)\n", deviceProp.clockRate * 1e-3f,
deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f Mhz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), "
"2D=(%d,%d), 3D=(%d,%d,%d)\n", deviceProp.maxTexture1D,
deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1],
deviceProp.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, "
"2D=(%d,%d) x %d\n", deviceProp.maxTexture1DLayered[0],
deviceProp.maxTexture1DLayered[1], deviceProp.maxTexture2DLayered[0],
deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
}
///////////////////
// parse options //
///////////////////
///////////////////
// main function //
///////////////////
int main(int argc, char *argv[])
{
int deviceID=1;
// TODO:
// To change argments parser from boost to Taywee/args?
/*
options_description options1("This programm does GPU stress test.");
options1.add_options()
("help,h", "help mesage.")
("deviceid,d", value<int>(), "set DeviceId of GPU.");
//("memory_allocation_size,s", "set Memory allocation size (Mb).");
variables_map values;
try{
store(parse_command_line(argc, argv, options1), values);
notify(values);
if (values.count("help")) {
cout << options1 << endl;
exit(EXIT_FAILURE);
}
if (!values.count("deviceid")) {
// options_description は標準出力に投げることが出来る
cout << options1 << endl;
exit(EXIT_FAILURE);
}
if (values.count("deviceid"))
cout << "set DeviceId: " << endl;
//cout << "set DeviceId: " << values["deviceid"].as<string>() << endl;
}catch(std::exception &e){
std::cout << e.what() << std::endl;
exit(EXIT_FAILURE);
}
*/
console->info("CHECK: data type [int].");
GPUTest<int> *g = new GPUTest<int>(deviceID);
g->getDeviceInfo();
g->graduallyAllocate();
console->info("CHECK: data type [float].");
GPUTest<float> *fg = new GPUTest<float>(deviceID);
fg->getDeviceInfo();
fg->graduallyAllocate();
console->info("CHECK: data type [double].");
GPUTest<double> *dg = new GPUTest<double>(deviceID);
dg->getDeviceInfo();
dg->graduallyAllocate();
console->info("CHECK: data type [doube].");
GPUTest<char> *cg = new GPUTest<char>(deviceID);
cg->getDeviceInfo();
cg->graduallyAllocate();
cudaDeviceReset();
return 0;
}
|
0c82b09fd41d87ff70a49759d3ab5a053fe3d199.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/transform.h>
#include <algorithm>
#include <exception>
#include "query/algorithm.hpp"
#include "query/iterator.hpp"
CGoCallResHandle InitIndexVector(uint32_t *indexVector, uint32_t start,
int indexVectorLength, void *cudaStream, int device) {
CGoCallResHandle resHandle = {nullptr, nullptr};
try {
#ifdef RUN_ON_DEVICE
hipSetDevice(device);
thrust::sequence(
thrust::hip::par.on(reinterpret_cast<hipStream_t>(cudaStream)),
indexVector, indexVector + indexVectorLength, start);
#else
thrust::sequence(thrust::host,
indexVector,
indexVector + indexVectorLength,
start);
#endif
CheckCUDAError("InitIndexVector");
}
catch (std::exception &e) {
std::cerr << "Exception happend when doing InitIndexVector:" << e.what()
<< std::endl;
resHandle.pStrErr = strdup(e.what());
}
return resHandle;
}
| 0c82b09fd41d87ff70a49759d3ab5a053fe3d199.cu | // Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/transform.h>
#include <algorithm>
#include <exception>
#include "query/algorithm.hpp"
#include "query/iterator.hpp"
CGoCallResHandle InitIndexVector(uint32_t *indexVector, uint32_t start,
int indexVectorLength, void *cudaStream, int device) {
CGoCallResHandle resHandle = {nullptr, nullptr};
try {
#ifdef RUN_ON_DEVICE
cudaSetDevice(device);
thrust::sequence(
thrust::cuda::par.on(reinterpret_cast<cudaStream_t>(cudaStream)),
indexVector, indexVector + indexVectorLength, start);
#else
thrust::sequence(thrust::host,
indexVector,
indexVector + indexVectorLength,
start);
#endif
CheckCUDAError("InitIndexVector");
}
catch (std::exception &e) {
std::cerr << "Exception happend when doing InitIndexVector:" << e.what()
<< std::endl;
resHandle.pStrErr = strdup(e.what());
}
return resHandle;
}
|
3e7d93b413383c130164c81bb013e154b01b3968.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// This example demonstrates a parallel sum reduction
// using two kernel launches
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
typedef unsigned int uint;
template <typename T>
__global__ void reduce1(T *g_idata, T *g_odata)
{
extern __shared__ T sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = 1; s < blockDim.x; s *= 2)
{
if (tid % (2 * s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <typename T>
__global__ void reduce2(T *g_idata, T *g_odata)
{
extern __shared__ T sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = 1; s < blockDim.x; s *= 2)
{
unsigned int index = 2 * s * tid;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <typename T>
__global__ void reduce3(T *g_idata, T *g_odata)
{
extern __shared__ T sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s>0; s >>= 1)
{
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <typename T>
void test( void reduce(T *, T *), int elements, int threadsperblock)
{
hipError_t error;
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
// create array of 256k elements
const uint num_elements = (1 << elements);
// generate random input on the host
std::vector<T> h_input(num_elements);
for (unsigned int i = 0; i < h_input.size(); ++i)
{
h_input[i] = (T)1;
}
const T host_result = std::accumulate(h_input.begin(), h_input.end(), (T)0);
//std::cerr << "Host sum: " << host_result << std::endl;
// move input to device memory
T *d_input = (T*)0;
hipMalloc((void**)&d_input, sizeof(T)* num_elements);
hipMemcpy(d_input, &h_input[0], sizeof(T)* num_elements, hipMemcpyHostToDevice);
const size_t block_size = 1 << threadsperblock;
const size_t num_blocks = (num_elements / block_size);
// allocate space to hold one partial sum per block, plus one additional
// slot to store the total sum
T *d_partial_sums_and_total = 0;
hipMalloc((void**)&d_partial_sums_and_total, sizeof(T)* (num_blocks + 1));
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
reduce << <num_blocks, block_size, block_size * sizeof(T) >> >(d_input, d_partial_sums_and_total);
reduce << <1, num_blocks, num_blocks * sizeof(T) >> >(d_partial_sums_and_total, d_partial_sums_and_total + num_blocks);
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
int nIter = 1;
float msecPerVector = msecTotal / nIter;
double gigaFlops = ((num_elements - 1) * 1.0e-9f) / (msecPerVector / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec\n",
gigaFlops,
msecPerVector);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// copy the result back to the host
T device_result = 0;
hipMemcpy(&device_result, d_partial_sums_and_total + num_blocks, sizeof(T), hipMemcpyDeviceToHost);
//std::cout << "Device sum: " << device_result << std::endl;
// deallocate device memory
hipFree(d_input);
hipFree(d_partial_sums_and_total);
std::cout << std::endl;
}
int main(int argc, char **argv)
{
hipError_t error = hipSetDevice(0);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to set CUDA device (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
if (argc < 3)
{
fprintf(stderr, "usage: %s number_of_elements threads_per_block\n");
hipDeviceReset();
return 1;
}
int elements = atoi(argv[1]);
int tpb = atoi(argv[2]);
test<double>(reduce1<double>, elements, tpb);
test<double>(reduce2<double>, elements, tpb);
test<double>(reduce3<double>, elements, tpb);
hipDeviceReset();
return 0;
} | 3e7d93b413383c130164c81bb013e154b01b3968.cu | #ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// This example demonstrates a parallel sum reduction
// using two kernel launches
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
typedef unsigned int uint;
template <typename T>
__global__ void reduce1(T *g_idata, T *g_odata)
{
extern __shared__ T sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = 1; s < blockDim.x; s *= 2)
{
if (tid % (2 * s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <typename T>
__global__ void reduce2(T *g_idata, T *g_odata)
{
extern __shared__ T sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = 1; s < blockDim.x; s *= 2)
{
unsigned int index = 2 * s * tid;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <typename T>
__global__ void reduce3(T *g_idata, T *g_odata)
{
extern __shared__ T sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s>0; s >>= 1)
{
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <typename T>
void test( void reduce(T *, T *), int elements, int threadsperblock)
{
cudaError_t error;
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
// create array of 256k elements
const uint num_elements = (1 << elements);
// generate random input on the host
std::vector<T> h_input(num_elements);
for (unsigned int i = 0; i < h_input.size(); ++i)
{
h_input[i] = (T)1;
}
const T host_result = std::accumulate(h_input.begin(), h_input.end(), (T)0);
//std::cerr << "Host sum: " << host_result << std::endl;
// move input to device memory
T *d_input = (T*)0;
cudaMalloc((void**)&d_input, sizeof(T)* num_elements);
cudaMemcpy(d_input, &h_input[0], sizeof(T)* num_elements, cudaMemcpyHostToDevice);
const size_t block_size = 1 << threadsperblock;
const size_t num_blocks = (num_elements / block_size);
// allocate space to hold one partial sum per block, plus one additional
// slot to store the total sum
T *d_partial_sums_and_total = 0;
cudaMalloc((void**)&d_partial_sums_and_total, sizeof(T)* (num_blocks + 1));
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
reduce << <num_blocks, block_size, block_size * sizeof(T) >> >(d_input, d_partial_sums_and_total);
reduce << <1, num_blocks, num_blocks * sizeof(T) >> >(d_partial_sums_and_total, d_partial_sums_and_total + num_blocks);
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
int nIter = 1;
float msecPerVector = msecTotal / nIter;
double gigaFlops = ((num_elements - 1) * 1.0e-9f) / (msecPerVector / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec\n",
gigaFlops,
msecPerVector);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// copy the result back to the host
T device_result = 0;
cudaMemcpy(&device_result, d_partial_sums_and_total + num_blocks, sizeof(T), cudaMemcpyDeviceToHost);
//std::cout << "Device sum: " << device_result << std::endl;
// deallocate device memory
cudaFree(d_input);
cudaFree(d_partial_sums_and_total);
std::cout << std::endl;
}
int main(int argc, char **argv)
{
cudaError_t error = cudaSetDevice(0);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to set CUDA device (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
if (argc < 3)
{
fprintf(stderr, "usage: %s number_of_elements threads_per_block\n");
cudaDeviceReset();
return 1;
}
int elements = atoi(argv[1]);
int tpb = atoi(argv[2]);
test<double>(reduce1<double>, elements, tpb);
test<double>(reduce2<double>, elements, tpb);
test<double>(reduce3<double>, elements, tpb);
cudaDeviceReset();
return 0;
} |
f17c0a70e112c190a0345a06cc176a3864c01f20.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template <class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo,
magma_trans_t trans,
magma_diag_t diag,
magma_int_t m,
magma_int_t n,
scalar_t* dA,
magma_int_t ldda,
scalar_t* dB,
magma_int_t lddb,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaTriangularSolve<double>(
magma_uplo_t uplo,
magma_trans_t trans,
magma_diag_t diag,
magma_int_t m,
magma_int_t n,
double* dA,
magma_int_t ldda,
double* dB,
magma_int_t lddb,
const MAGMAQueue& magma_queue) {
magma_dtrsm(
MagmaLeft,
uplo,
trans,
diag,
m,
n,
1,
dA,
ldda,
dB,
lddb,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaTriangularSolve<float>(
magma_uplo_t uplo,
magma_trans_t trans,
magma_diag_t diag,
magma_int_t m,
magma_int_t n,
float* dA,
magma_int_t ldda,
float* dB,
magma_int_t lddb,
const MAGMAQueue& magma_queue) {
magma_strsm(
MagmaLeft,
uplo,
trans,
diag,
m,
n,
1,
dA,
ldda,
dB,
lddb,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaTriangularSolve<c10::complex<double>>(
magma_uplo_t uplo,
magma_trans_t trans,
magma_diag_t diag,
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* dB,
magma_int_t lddb,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magma_ztrsm(
MagmaLeft,
uplo,
trans,
diag,
m,
n,
alpha,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(dB),
lddb,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaTriangularSolve<c10::complex<float>>(
magma_uplo_t uplo,
magma_trans_t trans,
magma_diag_t diag,
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* dB,
magma_int_t lddb,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magma_ctrsm(
MagmaLeft,
uplo,
trans,
diag,
m,
n,
alpha,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(dB),
lddb,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = ::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
infos = infos.to(at::kCPU); // magmaSolve requires infos tensor to live on CPU
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
} else {
auto infos_data = infos.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
auto infos = at::empty({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// This is a type dispatching helper function for 'apply_solve'
Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) {
// 'result' and 'input' should be in column major order (it should be checked before calling this function)
// the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve'
// 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations)
// 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system)
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{
apply_solve<scalar_t>(result, input, infos);
});
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires infos tensor to live on CPU
infos_lu = infos_lu.to(at::kCPU);
infos_getri = infos_getri.to(at::kCPU);
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>());
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>());
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor result;
if (self.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
const Tensor input = upper ? self : self.transpose(-1, -2);
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).copy_(input).transpose_(-1, -2);
} else {
result = cloneBatchedColumnMajor(upper ? self.transpose(-1, -2) : self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
self.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
return upper ? result.transpose_(-1, -2) : result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = ::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = ::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
MAGMAQueue magma_queue(b.get_device());
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(
uplo, trans, diag, n, nrhs, A_data, n, b_data, n, magma_queue);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _linalg_qr_helper_cuda(const Tensor& self, std::string mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
r_working_copy = at::empty({n_columns_q, n}, self.options());
if (compute_q) {
int64_t n_rows_q = q_sizes[self.dim() - 2];
q_working_copy = at::eye(n_rows_q, n_columns_q, self.options());
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<value_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
lrwork = magma_int_cast(rwkopt, "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype()));
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options().dtype(dtype))
: at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ syevd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This function computes eigenvalues 'w' and eigenvectors 'v' of the tensor 'self'
// compute_eigenvectors controls whether eigenvectors should be computed
// uplo controls the portion of input matrix to consider in computations, allowed values are "u", "U", "l", "L"
// '_symeig_helper_cuda' prepares correct input for 'apply_symeig' and checks for possible errors using 'infos'
// See also CPU implementation in aten/src/ATen/native/BatchLinearAlgebra.cpp
std::tuple<Tensor, Tensor> _syevd_helper_cuda(const Tensor& self, bool compute_eigenvectors, std::string uplo_str) {
// NumPy allows lowercase input for UPLO argument
// It is assumed that uplo_str is either "U" or "L"
char uplo = std::toupper(uplo_str[0]);
bool upper = uplo == 'U' ? true : false;
return _symeig_helper_cuda(self, compute_eigenvectors, upper);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto mn = ::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
| f17c0a70e112c190a0345a06cc176a3864c01f20.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template <class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo,
magma_trans_t trans,
magma_diag_t diag,
magma_int_t m,
magma_int_t n,
scalar_t* dA,
magma_int_t ldda,
scalar_t* dB,
magma_int_t lddb,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaTriangularSolve<double>(
magma_uplo_t uplo,
magma_trans_t trans,
magma_diag_t diag,
magma_int_t m,
magma_int_t n,
double* dA,
magma_int_t ldda,
double* dB,
magma_int_t lddb,
const MAGMAQueue& magma_queue) {
magma_dtrsm(
MagmaLeft,
uplo,
trans,
diag,
m,
n,
1,
dA,
ldda,
dB,
lddb,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaTriangularSolve<float>(
magma_uplo_t uplo,
magma_trans_t trans,
magma_diag_t diag,
magma_int_t m,
magma_int_t n,
float* dA,
magma_int_t ldda,
float* dB,
magma_int_t lddb,
const MAGMAQueue& magma_queue) {
magma_strsm(
MagmaLeft,
uplo,
trans,
diag,
m,
n,
1,
dA,
ldda,
dB,
lddb,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaTriangularSolve<c10::complex<double>>(
magma_uplo_t uplo,
magma_trans_t trans,
magma_diag_t diag,
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* dB,
magma_int_t lddb,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magma_ztrsm(
MagmaLeft,
uplo,
trans,
diag,
m,
n,
alpha,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(dB),
lddb,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaTriangularSolve<c10::complex<float>>(
magma_uplo_t uplo,
magma_trans_t trans,
magma_diag_t diag,
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* dB,
magma_int_t lddb,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magma_ctrsm(
MagmaLeft,
uplo,
trans,
diag,
m,
n,
alpha,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(dB),
lddb,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = std::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
infos = infos.to(at::kCPU); // magmaSolve requires infos tensor to live on CPU
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
} else {
auto infos_data = infos.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
auto infos = at::empty({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// This is a type dispatching helper function for 'apply_solve'
Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) {
// 'result' and 'input' should be in column major order (it should be checked before calling this function)
// the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve'
// 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations)
// 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system)
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{
apply_solve<scalar_t>(result, input, infos);
});
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires infos tensor to live on CPU
infos_lu = infos_lu.to(at::kCPU);
infos_getri = infos_getri.to(at::kCPU);
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>());
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>());
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor result;
if (self.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
const Tensor input = upper ? self : self.transpose(-1, -2);
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).copy_(input).transpose_(-1, -2);
} else {
result = cloneBatchedColumnMajor(upper ? self.transpose(-1, -2) : self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
self.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
return upper ? result.transpose_(-1, -2) : result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = std::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = std::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
MAGMAQueue magma_queue(b.get_device());
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(
uplo, trans, diag, n, nrhs, A_data, n, b_data, n, magma_queue);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _linalg_qr_helper_cuda(const Tensor& self, std::string mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
r_working_copy = at::empty({n_columns_q, n}, self.options());
if (compute_q) {
int64_t n_rows_q = q_sizes[self.dim() - 2];
q_working_copy = at::eye(n_rows_q, n_columns_q, self.options());
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<value_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
lrwork = magma_int_cast(rwkopt, "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype()));
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options().dtype(dtype))
: at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ syevd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This function computes eigenvalues 'w' and eigenvectors 'v' of the tensor 'self'
// compute_eigenvectors controls whether eigenvectors should be computed
// uplo controls the portion of input matrix to consider in computations, allowed values are "u", "U", "l", "L"
// '_symeig_helper_cuda' prepares correct input for 'apply_symeig' and checks for possible errors using 'infos'
// See also CPU implementation in aten/src/ATen/native/BatchLinearAlgebra.cpp
std::tuple<Tensor, Tensor> _syevd_helper_cuda(const Tensor& self, bool compute_eigenvectors, std::string uplo_str) {
// NumPy allows lowercase input for UPLO argument
// It is assumed that uplo_str is either "U" or "L"
char uplo = std::toupper(uplo_str[0]);
bool upper = uplo == 'U' ? true : false;
return _symeig_helper_cuda(self, compute_eigenvectors, upper);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto mn = std::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
d7af4757a2a6b5de826209efc3268dbee24fe336.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include "cutil.h"
#include "cutil_inline_runtime.h"
#include "Postprocess.h"
#define USE_SHARED_MEM 0
#define FILTER_SIZE (5*5) // 5x5 kernel filter
#define BLOCK_SIZE 16 // block size
__constant__ float kernelFilter_D[FILTER_SIZE];
__constant__ int indexOffsetsU_D[25];
__constant__ int indexOffsetsV_D[25];
__constant__ float invScale_D;
__constant__ float offset_D;
texture<uchar4, hipTextureType2D, hipReadModeElementType> texRef;
template< typename R, typename T >
__device__ R Clamp( T value, T min, T max )
{
if ( value < min )
{
return (R)min;
}
else if ( value > max )
{
return (R)max;
}
else
{
return (R)value;
}
}
__global__ void PostprocessKernel( uchar4* dst, unsigned int imgWidth, unsigned int imgHeight )
{
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int bw = blockDim.x;
unsigned int bh = blockDim.y;
// Non-normalized U, V coordinates of input texture for current thread.
unsigned int u = ( bw * blockIdx.x ) + tx;
unsigned int v = ( bh * blockIdx.y ) + ty;
// Early-out if we are beyond the texture coordinates for our texture.
if ( u > imgWidth || v > imgHeight ) return;
#if USE_SHARED_MEM
__shared__ uchar4 sTex[BLOCK_SIZE+4][BLOCK_SIZE+4]; // 20 * 20 * 4 Bytes = 1,600 Bytes ~= 1.5 KB
// U, V, coordinates relative to the shared memory block
unsigned int sU = tx + 2;
unsigned int sV = ty + 2;
// Load the current (center) pixel into shared memory
sTex[sU][sV] = tex2D( texRef, u, v );
if ( tx < 2 )
{
// Left-edge
sTex[tx][sV] = tex2D( texRef, u - 2, v );
// Right-edge
sTex[BLOCK_SIZE+sU][sV] = tex2D( texRef, u + BLOCK_SIZE, v );
}
if ( ty < 2 )
{
// Top-edge
sTex[sU][ty] = tex2D( texRef, u, v - 2 );
// Bottom-edge
sTex[sU][BLOCK_SIZE+sV] = tex2D( texRef, u, v + BLOCK_SIZE );
}
if ( tx < 2 && ty < 2 ) // Corners
{
// Top-left
sTex[tx][ty] = tex2D(texRef, u - 2, v - 2 );
// Top-right
sTex[BLOCK_SIZE + sU][ty] = tex2D( texRef, u + BLOCK_SIZE, v - 2 );
// Bottom-left
sTex[tx][BLOCK_SIZE + sV] = tex2D( texRef, u - 2, v + BLOCK_SIZE );
// Bottom-right
sTex[BLOCK_SIZE + sU][BLOCK_SIZE + sV] = tex2D( texRef, u + BLOCK_SIZE, v + BLOCK_SIZE );
}
__syncthreads();
#endif
unsigned int index = ( v * imgWidth ) + u;
float4 tempColor = make_float4(0, 0, 0, 1);
for ( int i = 0; i < FILTER_SIZE; ++i )
{
#if USE_SHARED_MEM
uchar4 color = sTex[sU + indexOffsetsU_D[i]][sV + indexOffsetsV_D[i]];
#else
uchar4 color = tex2D( texRef, u + indexOffsetsU_D[i], v + indexOffsetsV_D[i] );
#endif
tempColor.x += color.x * kernelFilter_D[i];
tempColor.y += color.y * kernelFilter_D[i];
tempColor.z += color.z * kernelFilter_D[i];
}
dst[index] = make_uchar4( Clamp<unsigned char>(tempColor.x * invScale_D + offset_D, 0.0f, 255.0f), Clamp<unsigned char>(tempColor.y * invScale_D + offset_D, 0.0f, 255.0f), Clamp<unsigned char>(tempColor.z * invScale_D + offset_D, 0.0f, 255.0f), 1 );
}
uchar4* g_dstBuffer = NULL;
size_t g_BufferSize = 0;
void PostprocessCUDA( cudaGraphicsResource_t& dst, cudaGraphicsResource_t& src, unsigned int width, unsigned int height, float* filter_H, float scale, float offset_H )
{
// Avoid divide by zero error:
float invScale_H = ( scale == 0.0f ) ? 1.0f : 1.0f / scale;
int indexOffsetsU_H[] = {
-2, -1, 0, 1, 2,
-2, -1, 0, 1, 2,
-2, -1, 0, 1, 2,
-2, -1, 0, 1, 2,
-2, -1, 0, 1, 2,
};
int indexOffsetsV_H[] = {
-2, -2, -2, -2, -2,
-1, -1, -1, -1, -1,
0, 0, 0, 0, 0,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
};
// Copy the scale and offset to the device for use by the kernel.
cutilSafeCall( hipMemcpyToSymbol( invScale_D, &invScale_H, sizeof(float), 0, hipMemcpyHostToDevice ) );
cutilSafeCall( hipMemcpyToSymbol( offset_D, &offset_H, sizeof(float), 0, hipMemcpyHostToDevice ) );
// Copy the data in the filter to the constant device variable.
cutilSafeCall( hipMemcpyToSymbol( kernelFilter_D, filter_H, FILTER_SIZE * sizeof(float), 0, hipMemcpyHostToDevice ) );
// Copy the index offset arrays to constant memory
cutilSafeCall( hipMemcpyToSymbol( indexOffsetsU_D, indexOffsetsU_H, 25 * sizeof(int), 0, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpyToSymbol( indexOffsetsV_D, indexOffsetsV_H, 25 * sizeof(int), 0, hipMemcpyHostToDevice) );
cudaGraphicsResource_t resources[2] = { src, dst };
// Map the resources so they can be used in the kernel.
cutilSafeCall( hipGraphicsMapResources( 2, resources ) );
hipArray* srcArray;
hipArray* dstArray;
// Get a device pointer to the OpenGL buffers
cutilSafeCall( hipGraphicsSubResourceGetMappedArray( &srcArray, src, 0, 0 ) );
cutilSafeCall( hipGraphicsSubResourceGetMappedArray( &dstArray, dst, 0, 0 ) );
// Map the source texture to a texture reference.
cutilSafeCall( hipBindTextureToArray( texRef, srcArray ) );
// Destination buffer to store the result of the postprocess effect.
size_t bufferSize = width * height * sizeof(uchar4);
if ( g_BufferSize != bufferSize )
{
if ( g_dstBuffer != NULL )
{
hipFree( g_dstBuffer );
}
// Only re-allocate the global memory buffer if the screen size changes,
// or it has never been allocated before (g_BufferSize is still 0)
g_BufferSize = bufferSize;
cutilSafeCall( hipMalloc( &g_dstBuffer, g_BufferSize ) );
}
// Compute the grid size
size_t blocksW = (size_t)ceilf( width / (float)BLOCK_SIZE );
size_t blocksH = (size_t)ceilf( height / (float)BLOCK_SIZE );
dim3 gridDim( blocksW, blocksH, 1 );
dim3 blockDim( BLOCK_SIZE, BLOCK_SIZE, 1 );
hipLaunchKernelGGL(( PostprocessKernel), dim3(gridDim), dim3(blockDim) , 0, 0, g_dstBuffer, width, height );
// Copy the destination back to the source array
cutilSafeCall( hipMemcpyToArray( dstArray, 0, 0, g_dstBuffer, bufferSize, hipMemcpyDeviceToDevice ) );
// Unbind the texture reference
cutilSafeCall( hipUnbindTexture( texRef ) );
// Unmap the resources again so the texture can be rendered in OpenGL
cutilSafeCall( hipGraphicsUnmapResources( 2, resources ) );
} | d7af4757a2a6b5de826209efc3268dbee24fe336.cu | #include <cuda_runtime_api.h>
#include "cutil.h"
#include "cutil_inline_runtime.h"
#include "Postprocess.h"
#define USE_SHARED_MEM 0
#define FILTER_SIZE (5*5) // 5x5 kernel filter
#define BLOCK_SIZE 16 // block size
__constant__ float kernelFilter_D[FILTER_SIZE];
__constant__ int indexOffsetsU_D[25];
__constant__ int indexOffsetsV_D[25];
__constant__ float invScale_D;
__constant__ float offset_D;
texture<uchar4, cudaTextureType2D, cudaReadModeElementType> texRef;
template< typename R, typename T >
__device__ R Clamp( T value, T min, T max )
{
if ( value < min )
{
return (R)min;
}
else if ( value > max )
{
return (R)max;
}
else
{
return (R)value;
}
}
__global__ void PostprocessKernel( uchar4* dst, unsigned int imgWidth, unsigned int imgHeight )
{
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int bw = blockDim.x;
unsigned int bh = blockDim.y;
// Non-normalized U, V coordinates of input texture for current thread.
unsigned int u = ( bw * blockIdx.x ) + tx;
unsigned int v = ( bh * blockIdx.y ) + ty;
// Early-out if we are beyond the texture coordinates for our texture.
if ( u > imgWidth || v > imgHeight ) return;
#if USE_SHARED_MEM
__shared__ uchar4 sTex[BLOCK_SIZE+4][BLOCK_SIZE+4]; // 20 * 20 * 4 Bytes = 1,600 Bytes ~= 1.5 KB
// U, V, coordinates relative to the shared memory block
unsigned int sU = tx + 2;
unsigned int sV = ty + 2;
// Load the current (center) pixel into shared memory
sTex[sU][sV] = tex2D( texRef, u, v );
if ( tx < 2 )
{
// Left-edge
sTex[tx][sV] = tex2D( texRef, u - 2, v );
// Right-edge
sTex[BLOCK_SIZE+sU][sV] = tex2D( texRef, u + BLOCK_SIZE, v );
}
if ( ty < 2 )
{
// Top-edge
sTex[sU][ty] = tex2D( texRef, u, v - 2 );
// Bottom-edge
sTex[sU][BLOCK_SIZE+sV] = tex2D( texRef, u, v + BLOCK_SIZE );
}
if ( tx < 2 && ty < 2 ) // Corners
{
// Top-left
sTex[tx][ty] = tex2D(texRef, u - 2, v - 2 );
// Top-right
sTex[BLOCK_SIZE + sU][ty] = tex2D( texRef, u + BLOCK_SIZE, v - 2 );
// Bottom-left
sTex[tx][BLOCK_SIZE + sV] = tex2D( texRef, u - 2, v + BLOCK_SIZE );
// Bottom-right
sTex[BLOCK_SIZE + sU][BLOCK_SIZE + sV] = tex2D( texRef, u + BLOCK_SIZE, v + BLOCK_SIZE );
}
__syncthreads();
#endif
unsigned int index = ( v * imgWidth ) + u;
float4 tempColor = make_float4(0, 0, 0, 1);
for ( int i = 0; i < FILTER_SIZE; ++i )
{
#if USE_SHARED_MEM
uchar4 color = sTex[sU + indexOffsetsU_D[i]][sV + indexOffsetsV_D[i]];
#else
uchar4 color = tex2D( texRef, u + indexOffsetsU_D[i], v + indexOffsetsV_D[i] );
#endif
tempColor.x += color.x * kernelFilter_D[i];
tempColor.y += color.y * kernelFilter_D[i];
tempColor.z += color.z * kernelFilter_D[i];
}
dst[index] = make_uchar4( Clamp<unsigned char>(tempColor.x * invScale_D + offset_D, 0.0f, 255.0f), Clamp<unsigned char>(tempColor.y * invScale_D + offset_D, 0.0f, 255.0f), Clamp<unsigned char>(tempColor.z * invScale_D + offset_D, 0.0f, 255.0f), 1 );
}
uchar4* g_dstBuffer = NULL;
size_t g_BufferSize = 0;
void PostprocessCUDA( cudaGraphicsResource_t& dst, cudaGraphicsResource_t& src, unsigned int width, unsigned int height, float* filter_H, float scale, float offset_H )
{
// Avoid divide by zero error:
float invScale_H = ( scale == 0.0f ) ? 1.0f : 1.0f / scale;
int indexOffsetsU_H[] = {
-2, -1, 0, 1, 2,
-2, -1, 0, 1, 2,
-2, -1, 0, 1, 2,
-2, -1, 0, 1, 2,
-2, -1, 0, 1, 2,
};
int indexOffsetsV_H[] = {
-2, -2, -2, -2, -2,
-1, -1, -1, -1, -1,
0, 0, 0, 0, 0,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
};
// Copy the scale and offset to the device for use by the kernel.
cutilSafeCall( cudaMemcpyToSymbol( invScale_D, &invScale_H, sizeof(float), 0, cudaMemcpyHostToDevice ) );
cutilSafeCall( cudaMemcpyToSymbol( offset_D, &offset_H, sizeof(float), 0, cudaMemcpyHostToDevice ) );
// Copy the data in the filter to the constant device variable.
cutilSafeCall( cudaMemcpyToSymbol( kernelFilter_D, filter_H, FILTER_SIZE * sizeof(float), 0, cudaMemcpyHostToDevice ) );
// Copy the index offset arrays to constant memory
cutilSafeCall( cudaMemcpyToSymbol( indexOffsetsU_D, indexOffsetsU_H, 25 * sizeof(int), 0, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpyToSymbol( indexOffsetsV_D, indexOffsetsV_H, 25 * sizeof(int), 0, cudaMemcpyHostToDevice) );
cudaGraphicsResource_t resources[2] = { src, dst };
// Map the resources so they can be used in the kernel.
cutilSafeCall( cudaGraphicsMapResources( 2, resources ) );
cudaArray* srcArray;
cudaArray* dstArray;
// Get a device pointer to the OpenGL buffers
cutilSafeCall( cudaGraphicsSubResourceGetMappedArray( &srcArray, src, 0, 0 ) );
cutilSafeCall( cudaGraphicsSubResourceGetMappedArray( &dstArray, dst, 0, 0 ) );
// Map the source texture to a texture reference.
cutilSafeCall( cudaBindTextureToArray( texRef, srcArray ) );
// Destination buffer to store the result of the postprocess effect.
size_t bufferSize = width * height * sizeof(uchar4);
if ( g_BufferSize != bufferSize )
{
if ( g_dstBuffer != NULL )
{
cudaFree( g_dstBuffer );
}
// Only re-allocate the global memory buffer if the screen size changes,
// or it has never been allocated before (g_BufferSize is still 0)
g_BufferSize = bufferSize;
cutilSafeCall( cudaMalloc( &g_dstBuffer, g_BufferSize ) );
}
// Compute the grid size
size_t blocksW = (size_t)ceilf( width / (float)BLOCK_SIZE );
size_t blocksH = (size_t)ceilf( height / (float)BLOCK_SIZE );
dim3 gridDim( blocksW, blocksH, 1 );
dim3 blockDim( BLOCK_SIZE, BLOCK_SIZE, 1 );
PostprocessKernel<<< gridDim, blockDim >>>( g_dstBuffer, width, height );
// Copy the destination back to the source array
cutilSafeCall( cudaMemcpyToArray( dstArray, 0, 0, g_dstBuffer, bufferSize, cudaMemcpyDeviceToDevice ) );
// Unbind the texture reference
cutilSafeCall( cudaUnbindTexture( texRef ) );
// Unmap the resources again so the texture can be rendered in OpenGL
cutilSafeCall( cudaGraphicsUnmapResources( 2, resources ) );
} |
2b52183a7074fbc2580f3ebabfc6d06b38c8467d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <raft/linalg/cublas_wrappers.h>
#include <test_utils.h>
#include <cuda_utils.cuh>
#include <cuml/common/logger.hpp>
#include <cuml/decomposition/pca_mg.hpp>
#include <matrix/matrix.cuh>
#include <opg/linalg/gemm.hpp>
#include <opg/matrix/matrix_utils.hpp>
#include "test_opg_utils.h"
#include <common/cumlHandle.hpp>
#include <raft/comms/mpi_comms.hpp>
namespace MLCommon {
namespace Test {
namespace opg {
struct PCAOpgParams {
int M;
int N;
int N_components;
ML::mg_solver algorithm;
std::vector<int> partSizes;
std::vector<int> ranksOwners;
Matrix::Layout layout;
unsigned long long int seed;
};
template <typename T>
class PCAOpgTest : public testing::TestWithParam<PCAOpgParams> {
public:
void SetUp() {
params = GetParam();
raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD);
// Prepare resource
const raft::comms::comms_t& comm = handle.get_comms();
stream = handle.get_stream();
const auto allocator = handle.get_device_allocator();
hipblasHandle_t cublasHandle = handle.get_cublas_handle();
myRank = comm.get_rank();
totalRanks = comm.get_size();
raft::random::Rng r(params.seed + myRank);
CUBLAS_CHECK(hipblasSetStream(cublasHandle, stream));
if (myRank == 0) {
std::cout << "Testing PCA of " << params.M << " x " << params.N
<< " matrix" << std::endl;
}
// Prepare X matrix
std::vector<Matrix::RankSizePair*> totalPartsToRanks;
for (int i = 0; i < params.partSizes.size(); i++) {
Matrix::RankSizePair* rspt = new Matrix::RankSizePair(
params.ranksOwners[i] % totalRanks, params.partSizes[i]);
totalPartsToRanks.push_back(rspt);
}
Matrix::PartDescriptor desc(params.M, params.N, totalPartsToRanks,
comm.get_rank(), params.layout);
std::vector<Matrix::Data<T>*> inParts;
Matrix::opg::allocate(handle, inParts, desc, myRank, stream);
Matrix::opg::randomize(handle, r, inParts, desc, myRank, stream, T(10.0),
T(20.0));
handle.wait_on_user_stream();
prmsPCA.n_rows = params.M;
prmsPCA.n_cols = params.N;
prmsPCA.n_components = params.N_components;
prmsPCA.whiten = false;
prmsPCA.n_iterations = 100;
prmsPCA.tol = 0.01;
prmsPCA.algorithm = params.algorithm;
device_buffer<T> components(allocator, stream,
prmsPCA.n_components * prmsPCA.n_cols);
device_buffer<T> explained_var(allocator, stream, prmsPCA.n_components);
device_buffer<T> explained_var_ratio(allocator, stream,
prmsPCA.n_components);
device_buffer<T> singular_vals(allocator, stream, prmsPCA.n_components);
device_buffer<T> mu(allocator, stream, prmsPCA.n_cols);
device_buffer<T> noise_vars(allocator, stream, prmsPCA.n_components);
ML::PCA::opg::fit(handle, inParts, desc, components.data(),
explained_var.data(), explained_var_ratio.data(),
singular_vals.data(), mu.data(), noise_vars.data(),
prmsPCA, false);
CUML_LOG_DEBUG(raft::arr2Str(singular_vals.data(), params.N_components,
"Singular Vals", stream)
.c_str());
CUML_LOG_DEBUG(raft::arr2Str(explained_var.data(), params.N_components,
"Explained Variance", stream)
.c_str());
CUML_LOG_DEBUG(raft::arr2Str(explained_var_ratio.data(),
params.N_components,
"Explained Variance Ratio", stream)
.c_str());
CUML_LOG_DEBUG(raft::arr2Str(components.data(),
params.N_components * params.N, "Components",
stream)
.c_str());
Matrix::opg::deallocate(handle, inParts, desc, myRank, stream);
}
protected:
PCAOpgParams params;
raft::handle_t handle;
hipStream_t stream;
int myRank;
int totalRanks;
ML::paramsPCAMG prmsPCA;
};
const std::vector<PCAOpgParams> inputs = {{20,
4,
2,
ML::mg_solver::COV_EIG_JACOBI,
{11, 9},
{1, 0},
Matrix::LayoutColMajor,
223548ULL},
{20,
4,
2,
ML::mg_solver::COV_EIG_DQ,
{11, 9},
{1, 0},
Matrix::LayoutColMajor,
223548ULL},
{20,
4,
2,
ML::mg_solver::QR,
{11, 9},
{1, 0},
Matrix::LayoutColMajor,
223548ULL}};
typedef PCAOpgTest<float> PCAOpgTestF;
TEST_P(PCAOpgTestF, Result) {
if (myRank == 0) {
// We should be inverse transforming and checking against the original
// data here. Github reference: https://github.com/rapidsai/cuml/issues/2474
ASSERT_TRUE(true);
}
}
INSTANTIATE_TEST_CASE_P(PCAOpgTest, PCAOpgTestF, ::testing::ValuesIn(inputs));
typedef PCAOpgTest<double> PCAOpgTestD;
TEST_P(PCAOpgTestD, Result) {
if (myRank == 0) {
// We should be inverse transforming and checking against the original
// data here. Github reference: https://github.com/rapidsai/cuml/issues/2474
ASSERT_TRUE(true);
}
}
INSTANTIATE_TEST_CASE_P(PCAOpgTest, PCAOpgTestD, ::testing::ValuesIn(inputs));
} // end namespace opg
} // end namespace Test
} // end namespace MLCommon
| 2b52183a7074fbc2580f3ebabfc6d06b38c8467d.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <raft/linalg/cublas_wrappers.h>
#include <test_utils.h>
#include <cuda_utils.cuh>
#include <cuml/common/logger.hpp>
#include <cuml/decomposition/pca_mg.hpp>
#include <matrix/matrix.cuh>
#include <opg/linalg/gemm.hpp>
#include <opg/matrix/matrix_utils.hpp>
#include "test_opg_utils.h"
#include <common/cumlHandle.hpp>
#include <raft/comms/mpi_comms.hpp>
namespace MLCommon {
namespace Test {
namespace opg {
struct PCAOpgParams {
int M;
int N;
int N_components;
ML::mg_solver algorithm;
std::vector<int> partSizes;
std::vector<int> ranksOwners;
Matrix::Layout layout;
unsigned long long int seed;
};
template <typename T>
class PCAOpgTest : public testing::TestWithParam<PCAOpgParams> {
public:
void SetUp() {
params = GetParam();
raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD);
// Prepare resource
const raft::comms::comms_t& comm = handle.get_comms();
stream = handle.get_stream();
const auto allocator = handle.get_device_allocator();
cublasHandle_t cublasHandle = handle.get_cublas_handle();
myRank = comm.get_rank();
totalRanks = comm.get_size();
raft::random::Rng r(params.seed + myRank);
CUBLAS_CHECK(cublasSetStream(cublasHandle, stream));
if (myRank == 0) {
std::cout << "Testing PCA of " << params.M << " x " << params.N
<< " matrix" << std::endl;
}
// Prepare X matrix
std::vector<Matrix::RankSizePair*> totalPartsToRanks;
for (int i = 0; i < params.partSizes.size(); i++) {
Matrix::RankSizePair* rspt = new Matrix::RankSizePair(
params.ranksOwners[i] % totalRanks, params.partSizes[i]);
totalPartsToRanks.push_back(rspt);
}
Matrix::PartDescriptor desc(params.M, params.N, totalPartsToRanks,
comm.get_rank(), params.layout);
std::vector<Matrix::Data<T>*> inParts;
Matrix::opg::allocate(handle, inParts, desc, myRank, stream);
Matrix::opg::randomize(handle, r, inParts, desc, myRank, stream, T(10.0),
T(20.0));
handle.wait_on_user_stream();
prmsPCA.n_rows = params.M;
prmsPCA.n_cols = params.N;
prmsPCA.n_components = params.N_components;
prmsPCA.whiten = false;
prmsPCA.n_iterations = 100;
prmsPCA.tol = 0.01;
prmsPCA.algorithm = params.algorithm;
device_buffer<T> components(allocator, stream,
prmsPCA.n_components * prmsPCA.n_cols);
device_buffer<T> explained_var(allocator, stream, prmsPCA.n_components);
device_buffer<T> explained_var_ratio(allocator, stream,
prmsPCA.n_components);
device_buffer<T> singular_vals(allocator, stream, prmsPCA.n_components);
device_buffer<T> mu(allocator, stream, prmsPCA.n_cols);
device_buffer<T> noise_vars(allocator, stream, prmsPCA.n_components);
ML::PCA::opg::fit(handle, inParts, desc, components.data(),
explained_var.data(), explained_var_ratio.data(),
singular_vals.data(), mu.data(), noise_vars.data(),
prmsPCA, false);
CUML_LOG_DEBUG(raft::arr2Str(singular_vals.data(), params.N_components,
"Singular Vals", stream)
.c_str());
CUML_LOG_DEBUG(raft::arr2Str(explained_var.data(), params.N_components,
"Explained Variance", stream)
.c_str());
CUML_LOG_DEBUG(raft::arr2Str(explained_var_ratio.data(),
params.N_components,
"Explained Variance Ratio", stream)
.c_str());
CUML_LOG_DEBUG(raft::arr2Str(components.data(),
params.N_components * params.N, "Components",
stream)
.c_str());
Matrix::opg::deallocate(handle, inParts, desc, myRank, stream);
}
protected:
PCAOpgParams params;
raft::handle_t handle;
cudaStream_t stream;
int myRank;
int totalRanks;
ML::paramsPCAMG prmsPCA;
};
const std::vector<PCAOpgParams> inputs = {{20,
4,
2,
ML::mg_solver::COV_EIG_JACOBI,
{11, 9},
{1, 0},
Matrix::LayoutColMajor,
223548ULL},
{20,
4,
2,
ML::mg_solver::COV_EIG_DQ,
{11, 9},
{1, 0},
Matrix::LayoutColMajor,
223548ULL},
{20,
4,
2,
ML::mg_solver::QR,
{11, 9},
{1, 0},
Matrix::LayoutColMajor,
223548ULL}};
typedef PCAOpgTest<float> PCAOpgTestF;
TEST_P(PCAOpgTestF, Result) {
if (myRank == 0) {
// We should be inverse transforming and checking against the original
// data here. Github reference: https://github.com/rapidsai/cuml/issues/2474
ASSERT_TRUE(true);
}
}
INSTANTIATE_TEST_CASE_P(PCAOpgTest, PCAOpgTestF, ::testing::ValuesIn(inputs));
typedef PCAOpgTest<double> PCAOpgTestD;
TEST_P(PCAOpgTestD, Result) {
if (myRank == 0) {
// We should be inverse transforming and checking against the original
// data here. Github reference: https://github.com/rapidsai/cuml/issues/2474
ASSERT_TRUE(true);
}
}
INSTANTIATE_TEST_CASE_P(PCAOpgTest, PCAOpgTestD, ::testing::ValuesIn(inputs));
} // end namespace opg
} // end namespace Test
} // end namespace MLCommon
|
5bf5ebdd7d5ae405c192036802ba0429f16f5004.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include "book.h"
#define SIZE (100*1024*1024)
#define ELEMENTS (SIZE / sizeof(unsigned int))
#define HASH_ENTRIES 1024
struct Entry {
unsigned int key;
void* value;
Entry* next;
};
struct Table {
size_t count;
Entry** entries;
Entry* pool;
Entry* firstFree;
};
size_t hash(unsigned int key, size_t count) {
return key % count;
}
void verify_table(const Table& table) {
int count = 0;
for (size_t i = 0; i < table.count; ++i) {
Entry* current = table.entries[i];
while (current != NULL) {
++count;
if (hash(current->key, table.count) != i)
printf("%d hashed to %ld, but was located at %ld\n", current->value, hash(current->key, table.count), i);
current = current->next;
}
}
if (count != ELEMENTS)
printf("%d elements found in hash table. Should be %ld\n", count, ELEMENTS);
else
printf("ALL %d elements found in hash tale.\n", count);
}
void initialize_table(Table& table, int entries, int elements) {
table.count = entries;
table.entries = (Entry**)calloc(entries, sizeof(Entry*));
table.pool = (Entry*)malloc(elements * sizeof(Entry));
table.firstFree = table.pool;
}
void free_table(Table& table) {
free(table.entries);
free(table.pool);
}
void add_to_table(Table& table, unsigned int key, void* value) {
// 1
size_t hashValue = hash(key, table.count);
// 2
Entry* location = table.firstFree++;
location->key = key;
location->value = value;
// 3
location->next = table.entries[hashValue];
table.entries[hashValue] = location;
}
int main(void) {
unsigned int* buffer = (unsigned int*)big_random_block(SIZE);
clock_t start, stop;
start = clock();
Table table;
initialize_table(table, HASH_ENTRIES, ELEMENTS);
for (int i = 0; i < ELEMENTS; ++i) {
add_to_table(table, buffer[i], (void*)NULL);
}
stop = clock();
float elapsedTime = (float)(stop - start) / (float)CLOCKS_PER_SEC * 1000.0f;
printf("Time to hash: %3.1f ms\n", elapsedTime);
verify_table(table);
free_table(table);
free(buffer);
return 0;
} | 5bf5ebdd7d5ae405c192036802ba0429f16f5004.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include "book.h"
#define SIZE (100*1024*1024)
#define ELEMENTS (SIZE / sizeof(unsigned int))
#define HASH_ENTRIES 1024
struct Entry {
unsigned int key;
void* value;
Entry* next;
};
struct Table {
size_t count;
Entry** entries;
Entry* pool;
Entry* firstFree;
};
size_t hash(unsigned int key, size_t count) {
return key % count;
}
void verify_table(const Table& table) {
int count = 0;
for (size_t i = 0; i < table.count; ++i) {
Entry* current = table.entries[i];
while (current != NULL) {
++count;
if (hash(current->key, table.count) != i)
printf("%d hashed to %ld, but was located at %ld\n", current->value, hash(current->key, table.count), i);
current = current->next;
}
}
if (count != ELEMENTS)
printf("%d elements found in hash table. Should be %ld\n", count, ELEMENTS);
else
printf("ALL %d elements found in hash tale.\n", count);
}
void initialize_table(Table& table, int entries, int elements) {
table.count = entries;
table.entries = (Entry**)calloc(entries, sizeof(Entry*));
table.pool = (Entry*)malloc(elements * sizeof(Entry));
table.firstFree = table.pool;
}
void free_table(Table& table) {
free(table.entries);
free(table.pool);
}
void add_to_table(Table& table, unsigned int key, void* value) {
// 단계 1
size_t hashValue = hash(key, table.count);
// 단계 2
Entry* location = table.firstFree++;
location->key = key;
location->value = value;
// 단계 3
location->next = table.entries[hashValue];
table.entries[hashValue] = location;
}
int main(void) {
unsigned int* buffer = (unsigned int*)big_random_block(SIZE);
clock_t start, stop;
start = clock();
Table table;
initialize_table(table, HASH_ENTRIES, ELEMENTS);
for (int i = 0; i < ELEMENTS; ++i) {
add_to_table(table, buffer[i], (void*)NULL);
}
stop = clock();
float elapsedTime = (float)(stop - start) / (float)CLOCKS_PER_SEC * 1000.0f;
printf("Time to hash: %3.1f ms\n", elapsedTime);
verify_table(table);
free_table(table);
free(buffer);
return 0;
} |
3f2af129b940ca02273147c3ba9b3a18308addef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017 XGBoost contributors
*/
#include <xgboost/tree_updater.h>
#include <memory>
#include <utility>
#include <vector>
#include "../common/compressed_iterator.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "param.h"
#include "updater_gpu_common.cuh"
namespace xgboost {
namespace tree {
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
typedef bst_gpair_integer gpair_sum_t;
static const ncclDataType_t nccl_sum_t = ncclInt64;
// Helper for explicit template specialisation
template <int N>
struct Int {};
struct DeviceGMat {
dh::dvec<common::compressed_byte_t> gidx_buffer;
common::CompressedIterator<uint32_t> gidx;
dh::dvec<size_t> row_ptr;
void Init(int device_idx, const common::GHistIndexMatrix& gmat,
bst_ulong element_begin, bst_ulong element_end, bst_ulong row_begin,
bst_ulong row_end, int n_bins) {
dh::safe_cuda(hipSetDevice(device_idx));
CHECK(gidx_buffer.size()) << "gidx_buffer must be externally allocated";
CHECK_EQ(row_ptr.size(), (row_end - row_begin) + 1)
<< "row_ptr must be externally allocated";
common::CompressedBufferWriter cbw(n_bins);
std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size());
cbw.Write(host_buffer.data(), gmat.index.begin() + element_begin,
gmat.index.begin() + element_end);
gidx_buffer = host_buffer;
gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), n_bins);
// row_ptr
thrust::copy(gmat.row_ptr.data() + row_begin,
gmat.row_ptr.data() + row_end + 1, row_ptr.tbegin());
// normalise row_ptr
size_t start = gmat.row_ptr[row_begin];
thrust::transform(row_ptr.tbegin(), row_ptr.tend(), row_ptr.tbegin(),
[=] __device__(size_t val) { return val - start; });
}
};
struct HistHelper {
gpair_sum_t* d_hist;
int n_bins;
__host__ __device__ HistHelper(gpair_sum_t* ptr, int n_bins)
: d_hist(ptr), n_bins(n_bins) {}
__device__ void Add(bst_gpair gpair, int gidx, int nidx) const {
int hist_idx = nidx * n_bins + gidx;
auto dst_ptr = reinterpret_cast<unsigned long long int*>(&d_hist[hist_idx]); // NOLINT
gpair_sum_t tmp(gpair.GetGrad(), gpair.GetHess());
auto src_ptr = reinterpret_cast<gpair_sum_t::value_t*>(&tmp);
atomicAdd(dst_ptr, static_cast<unsigned long long int>(*src_ptr)); // NOLINT
atomicAdd(dst_ptr + 1, static_cast<unsigned long long int>(*(src_ptr + 1))); // NOLINT
}
__device__ gpair_sum_t Get(int gidx, int nidx) const {
return d_hist[nidx * n_bins + gidx];
}
};
struct DeviceHist {
int n_bins;
dh::dvec<gpair_sum_t> data;
void Init(int n_bins_in) {
this->n_bins = n_bins_in;
CHECK(!data.empty()) << "DeviceHist must be externally allocated";
}
void Reset(int device_idx) {
hipSetDevice(device_idx);
data.fill(gpair_sum_t());
}
HistHelper GetBuilder() { return HistHelper(data.data(), n_bins); }
gpair_sum_t* GetLevelPtr(int depth) {
return data.data() + n_nodes(depth - 1) * n_bins;
}
int LevelSize(int depth) { return n_bins * n_nodes_level(depth); }
};
struct SplitCandidate {
float loss_chg;
bool missing_left;
float fvalue;
int findex;
gpair_sum_t left_sum;
gpair_sum_t right_sum;
__host__ __device__ SplitCandidate()
: loss_chg(-FLT_MAX), missing_left(true), fvalue(0), findex(-1) {}
__device__ void Update(float loss_chg_in, bool missing_left_in,
float fvalue_in, int findex_in,
gpair_sum_t left_sum_in, gpair_sum_t right_sum_in,
const GPUTrainingParam& param) {
if (loss_chg_in > loss_chg &&
left_sum_in.GetHess() >= param.min_child_weight &&
right_sum_in.GetHess() >= param.min_child_weight) {
loss_chg = loss_chg_in;
missing_left = missing_left_in;
fvalue = fvalue_in;
left_sum = left_sum_in;
right_sum = right_sum_in;
findex = findex_in;
}
}
__device__ bool IsValid() const { return loss_chg > 0.0f; }
};
struct GpairCallbackOp {
// Running prefix
gpair_sum_t running_total;
// Constructor
__device__ GpairCallbackOp() : running_total(gpair_sum_t()) {}
__device__ bst_gpair operator()(bst_gpair block_aggregate) {
gpair_sum_t old_prefix = running_total;
running_total += block_aggregate;
return old_prefix;
}
};
template <int BLOCK_THREADS>
__global__ void find_split_kernel(
const gpair_sum_t* d_level_hist, int* d_feature_segments, int depth,
int n_features, int n_bins, DeviceDenseNode* d_nodes,
int nodes_offset_device, float* d_fidx_min_map, float* d_gidx_fvalue_map,
GPUTrainingParam gpu_param, bool* d_left_child_smallest_temp,
bool colsample, int* d_feature_flags) {
typedef hipcub::KeyValuePair<int, float> ArgMaxT;
typedef hipcub::BlockScan<gpair_sum_t, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>
BlockScanT;
typedef hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT;
typedef hipcub::BlockReduce<gpair_sum_t, BLOCK_THREADS> SumReduceT;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
__shared__ cub::Uninitialized<SplitCandidate> uninitialized_split;
SplitCandidate& split = uninitialized_split.Alias();
__shared__ cub::Uninitialized<gpair_sum_t> uninitialized_sum;
gpair_sum_t& shared_sum = uninitialized_sum.Alias();
__shared__ ArgMaxT block_max;
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
split = SplitCandidate();
}
__syncthreads();
// below two are for accessing full-sized node list stored on each device
// always one block per node, BLOCK_THREADS threads per block
int level_node_idx = blockIdx.x + nodes_offset_device;
int node_idx = n_nodes(depth - 1) + level_node_idx;
for (int fidx = 0; fidx < n_features; fidx++) {
if (colsample && d_feature_flags[fidx] == 0) continue;
int begin = d_feature_segments[level_node_idx * n_features + fidx];
int end = d_feature_segments[level_node_idx * n_features + fidx + 1];
gpair_sum_t feature_sum = gpair_sum_t();
for (int reduce_begin = begin; reduce_begin < end;
reduce_begin += BLOCK_THREADS) {
bool thread_active = reduce_begin + threadIdx.x < end;
// Scan histogram
gpair_sum_t bin = thread_active ? d_level_hist[reduce_begin + threadIdx.x]
: gpair_sum_t();
feature_sum +=
SumReduceT(temp_storage.sum_reduce).Reduce(bin, hipcub::Sum());
}
if (threadIdx.x == 0) {
shared_sum = feature_sum;
}
// __syncthreads(); // no need to synch because below there is a Scan
GpairCallbackOp prefix_op = GpairCallbackOp();
for (int scan_begin = begin; scan_begin < end;
scan_begin += BLOCK_THREADS) {
bool thread_active = scan_begin + threadIdx.x < end;
gpair_sum_t bin = thread_active ? d_level_hist[scan_begin + threadIdx.x]
: gpair_sum_t();
BlockScanT(temp_storage.scan)
.ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op);
// Calculate gain
gpair_sum_t parent_sum = gpair_sum_t(d_nodes[node_idx].sum_gradients);
float parent_gain = d_nodes[node_idx].root_gain;
gpair_sum_t missing = parent_sum - shared_sum;
bool missing_left;
float gain = thread_active
? loss_chg_missing(bin, missing, parent_sum, parent_gain,
gpu_param, missing_left)
: -FLT_MAX;
__syncthreads();
// Find thread with best gain
ArgMaxT tuple(threadIdx.x, gain);
ArgMaxT best =
MaxReduceT(temp_storage.max_reduce).Reduce(tuple, hipcub::ArgMax());
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
float fvalue;
int gidx = (scan_begin - (level_node_idx * n_bins)) + threadIdx.x;
if (threadIdx.x == 0 &&
begin == scan_begin) { // check at start of first tile
fvalue = d_fidx_min_map[fidx];
} else {
fvalue = d_gidx_fvalue_map[gidx - 1];
}
gpair_sum_t left = missing_left ? bin + missing : bin;
gpair_sum_t right = parent_sum - left;
split.Update(gain, missing_left, fvalue, fidx, left, right, gpu_param);
}
__syncthreads();
} // end scan
} // end over features
// Create node
if (threadIdx.x == 0 && split.IsValid()) {
d_nodes[node_idx].SetSplit(split.fvalue, split.findex,
split.missing_left ? LeftDir : RightDir);
DeviceDenseNode& left_child = d_nodes[left_child_nidx(node_idx)];
DeviceDenseNode& right_child = d_nodes[right_child_nidx(node_idx)];
bool& left_child_smallest = d_left_child_smallest_temp[node_idx];
left_child =
DeviceDenseNode(split.left_sum, left_child_nidx(node_idx), gpu_param);
right_child =
DeviceDenseNode(split.right_sum, right_child_nidx(node_idx), gpu_param);
// Record smallest node
if (split.left_sum.GetHess() <= split.right_sum.GetHess()) {
left_child_smallest = true;
} else {
left_child_smallest = false;
}
}
}
class GPUHistMaker : public TreeUpdater {
public:
GPUHistMaker()
: initialised(false),
is_dense(false),
p_last_fmat_(nullptr),
prediction_cache_initialised(false) {}
~GPUHistMaker() {
if (initialised) {
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
ncclCommDestroy(comms[d_idx]);
dh::safe_cuda(hipSetDevice(dList[d_idx]));
dh::safe_cuda(hipStreamDestroy(*(streams[d_idx])));
}
for (int num_d = 1; num_d <= n_devices;
++num_d) { // loop over number of devices used
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
ncclCommDestroy(find_split_comms[num_d - 1][d_idx]);
}
}
}
}
void Init(
const std::vector<std::pair<std::string, std::string>>& args) override {
param.InitAllowUnknown(args);
CHECK(param.max_depth < 16) << "Tree depth too large.";
CHECK(param.max_depth != 0) << "Tree depth cannot be 0.";
CHECK(param.grow_policy != TrainParam::kLossGuide)
<< "Loss guided growth policy not supported. Use CPU algorithm.";
this->param = param;
CHECK(param.n_gpus != 0) << "Must have at least one device";
}
void Update(const std::vector<bst_gpair>& gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
GradStats::CheckInfo(dmat->info());
// rescale learning rate according to size of trees
float lr = param.learning_rate;
param.learning_rate = lr / trees.size();
// build tree
try {
for (size_t i = 0; i < trees.size(); ++i) {
this->UpdateTree(gpair, dmat, trees[i]);
}
} catch (const std::exception& e) {
LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl;
}
param.learning_rate = lr;
}
void InitData(const std::vector<bst_gpair>& gpair, DMatrix& fmat, // NOLINT
const RegTree& tree) {
dh::Timer time1;
// set member num_rows and n_devices for rest of GPUHistBuilder members
info = &fmat.info();
num_rows = info->num_row;
n_devices = dh::n_devices(param.n_gpus, num_rows);
if (!initialised) {
// reset static timers used across iterations
cpu_init_time = 0;
gpu_init_time = 0;
cpu_time.reset();
gpu_time = 0;
// set dList member
dList.resize(n_devices);
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices();
dList[d_idx] = device_idx;
}
// initialize nccl
comms.resize(n_devices);
streams.resize(n_devices);
dh::safe_nccl(ncclCommInitAll(comms.data(), n_devices,
dList.data())); // initialize communicator
// (One communicator per
// process)
// printf("# NCCL: Using devices\n");
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
streams[d_idx] =
reinterpret_cast<hipStream_t*>(malloc(sizeof(hipStream_t)));
dh::safe_cuda(hipSetDevice(dList[d_idx]));
dh::safe_cuda(hipStreamCreate(streams[d_idx]));
int cudaDev;
int rank;
hipDeviceProp_t prop;
dh::safe_nccl(ncclCommCuDevice(comms[d_idx], &cudaDev));
dh::safe_nccl(ncclCommUserRank(comms[d_idx], &rank));
dh::safe_cuda(hipGetDeviceProperties(&prop, cudaDev));
// printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
// prop.pciBusID, prop.name);
// hipDriverGetVersion(&driverVersion);
// hipRuntimeGetVersion(&runtimeVersion);
std::ostringstream oss;
oss << "CUDA Capability Major/Minor version number: " << prop.major
<< "." << prop.minor << " is insufficient. Need >=3.5.";
int failed = prop.major < 3 || prop.major == 3 && prop.minor < 5;
CHECK(failed == 0) << oss.str();
}
// local find_split group of comms for each case of reduced number of
// GPUs to use
find_split_comms.resize(
n_devices,
std::vector<ncclComm_t>(n_devices)); // TODO(JCM): Excessive, but
// ok, and best to do
// here instead of
// repeatedly
for (int num_d = 1; num_d <= n_devices;
++num_d) { // loop over number of devices used
dh::safe_nccl(
ncclCommInitAll(find_split_comms[num_d - 1].data(), num_d,
dList.data())); // initialize communicator
// (One communicator per
// process)
}
is_dense = info->num_nonzero == info->num_col * info->num_row;
dh::Timer time0;
hmat_.Init(&fmat, param.max_bin);
cpu_init_time += time0.elapsedSeconds();
if (param.debug_verbose) { // Only done once for each training session
LOG(CONSOLE) << "[GPU Plug-in] CPU Time for hmat_.Init "
<< time0.elapsedSeconds() << " sec";
fflush(stdout);
}
time0.reset();
gmat_.cut = &hmat_;
cpu_init_time += time0.elapsedSeconds();
if (param.debug_verbose) { // Only done once for each training session
LOG(CONSOLE) << "[GPU Plug-in] CPU Time for gmat_.cut "
<< time0.elapsedSeconds() << " sec";
fflush(stdout);
}
time0.reset();
gmat_.Init(&fmat);
cpu_init_time += time0.elapsedSeconds();
if (param.debug_verbose) { // Only done once for each training session
LOG(CONSOLE) << "[GPU Plug-in] CPU Time for gmat_.Init() "
<< time0.elapsedSeconds() << " sec";
fflush(stdout);
}
time0.reset();
if (param.debug_verbose) { // Only done once for each training session
LOG(CONSOLE)
<< "[GPU Plug-in] CPU Time for hmat_.Init, gmat_.cut, gmat_.Init "
<< cpu_init_time << " sec";
fflush(stdout);
}
int n_bins = hmat_.row_ptr.back();
int n_features = hmat_.row_ptr.size() - 1;
// deliniate data onto multiple gpus
device_row_segments.push_back(0);
device_element_segments.push_back(0);
bst_uint offset = 0;
bst_uint shard_size =
::ceil(static_cast<double>(num_rows) / n_devices);
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
offset += shard_size;
offset = ::min(offset, num_rows);
device_row_segments.push_back(offset);
device_element_segments.push_back(gmat_.row_ptr[offset]);
}
// Build feature segments
std::vector<int> h_feature_segments;
for (int node = 0; node < n_nodes_level(param.max_depth - 1); node++) {
for (int fidx = 0; fidx < n_features; fidx++) {
h_feature_segments.push_back(hmat_.row_ptr[fidx] + node * n_bins);
}
}
h_feature_segments.push_back(n_nodes_level(param.max_depth - 1) * n_bins);
// Construct feature map
std::vector<int> h_gidx_feature_map(n_bins);
for (int fidx = 0; fidx < n_features; fidx++) {
for (int i = hmat_.row_ptr[fidx]; i < hmat_.row_ptr[fidx + 1]; i++) {
h_gidx_feature_map[i] = fidx;
}
}
int level_max_bins = n_nodes_level(param.max_depth - 1) * n_bins;
// allocate unique common data that reside on master device (NOTE: None
// currently)
// int master_device=dList[0];
// ba.allocate(master_device, );
// allocate vectors across all devices
temp_memory.resize(n_devices);
hist_vec.resize(n_devices);
nodes.resize(n_devices);
nodes_temp.resize(n_devices);
nodes_child_temp.resize(n_devices);
left_child_smallest.resize(n_devices);
left_child_smallest_temp.resize(n_devices);
feature_flags.resize(n_devices);
fidx_min_map.resize(n_devices);
feature_segments.resize(n_devices);
prediction_cache.resize(n_devices);
position.resize(n_devices);
position_tmp.resize(n_devices);
device_matrix.resize(n_devices);
device_gpair.resize(n_devices);
gidx_feature_map.resize(n_devices);
gidx_fvalue_map.resize(n_devices);
int find_split_n_devices = ::pow(2, ::floor(std::log2(n_devices)));
find_split_n_devices =
::min(n_nodes_level(param.max_depth), find_split_n_devices);
int max_num_nodes_device =
n_nodes_level(param.max_depth) / find_split_n_devices;
// num_rows_segment: for sharding rows onto gpus for splitting data
// num_elements_segment: for sharding rows (of elements) onto gpus for
// splitting data
// max_num_nodes_device: for sharding nodes onto gpus for split finding
// All other variables have full copy on gpu, with copy either being
// identical or just current portion (like for histogram) before
// AllReduce
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
bst_uint num_rows_segment =
device_row_segments[d_idx + 1] - device_row_segments[d_idx];
bst_ulong num_elements_segment =
device_element_segments[d_idx + 1] - device_element_segments[d_idx];
ba.allocate(
device_idx, param.silent, &(hist_vec[d_idx].data),
n_nodes(param.max_depth - 1) * n_bins, &nodes[d_idx],
n_nodes(param.max_depth), &nodes_temp[d_idx], max_num_nodes_device,
&nodes_child_temp[d_idx], max_num_nodes_device,
&left_child_smallest[d_idx], n_nodes(param.max_depth),
&left_child_smallest_temp[d_idx], max_num_nodes_device,
&feature_flags[d_idx],
n_features, // may change but same on all devices
&fidx_min_map[d_idx],
hmat_.min_val.size(), // constant and same on all devices
&feature_segments[d_idx],
h_feature_segments.size(), // constant and same on all devices
&prediction_cache[d_idx], num_rows_segment, &position[d_idx],
num_rows_segment, &position_tmp[d_idx], num_rows_segment,
&device_gpair[d_idx], num_rows_segment,
&device_matrix[d_idx].gidx_buffer,
common::CompressedBufferWriter::CalculateBufferSize(
num_elements_segment,
n_bins), // constant and same on all devices
&device_matrix[d_idx].row_ptr, num_rows_segment + 1,
&gidx_feature_map[d_idx],
n_bins, // constant and same on all devices
&gidx_fvalue_map[d_idx],
hmat_.cut.size()); // constant and same on all devices
// Copy Host to Device (assumes comes after ba.allocate that sets
// device)
device_matrix[d_idx].Init(
device_idx, gmat_, device_element_segments[d_idx],
device_element_segments[d_idx + 1], device_row_segments[d_idx],
device_row_segments[d_idx + 1], n_bins);
gidx_feature_map[d_idx] = h_gidx_feature_map;
gidx_fvalue_map[d_idx] = hmat_.cut;
feature_segments[d_idx] = h_feature_segments;
fidx_min_map[d_idx] = hmat_.min_val;
// Initialize, no copy
hist_vec[d_idx].Init(n_bins); // init host object
prediction_cache[d_idx].fill(0); // init device object (assumes comes
// after ba.allocate that sets device)
feature_flags[d_idx].fill(
1); // init device object (assumes comes after
// ba.allocate that sets device)
}
}
// copy or init to do every iteration
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
nodes[d_idx].fill(DeviceDenseNode());
nodes_temp[d_idx].fill(DeviceDenseNode());
nodes_child_temp[d_idx].fill(DeviceDenseNode());
position[d_idx].fill(0);
device_gpair[d_idx].copy(gpair.begin() + device_row_segments[d_idx],
gpair.begin() + device_row_segments[d_idx + 1]);
subsample_gpair(&device_gpair[d_idx], param.subsample,
device_row_segments[d_idx]);
hist_vec[d_idx].Reset(device_idx);
// left_child_smallest and left_child_smallest_temp don't need to be
// initialized
}
dh::synchronize_n_devices(n_devices, dList);
if (!initialised) {
gpu_init_time = time1.elapsedSeconds() - cpu_init_time;
gpu_time = -cpu_init_time;
if (param.debug_verbose) { // Only done once for each training session
LOG(CONSOLE) << "[GPU Plug-in] Time for GPU operations during First "
"Call to InitData() "
<< gpu_init_time << " sec";
fflush(stdout);
}
}
p_last_fmat_ = &fmat;
initialised = true;
}
void BuildHist(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t begin = device_element_segments[d_idx];
size_t end = device_element_segments[d_idx + 1];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
auto d_gidx = device_matrix[d_idx].gidx;
auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin();
auto d_position = position[d_idx].data();
auto d_gpair = device_gpair[d_idx].data();
auto d_left_child_smallest = left_child_smallest[d_idx].data();
auto hist_builder = hist_vec[d_idx].GetBuilder();
dh::TransformLbs(
device_idx, &temp_memory[d_idx], end - begin, d_row_ptr,
row_end - row_begin, is_dense,
[=] __device__(size_t local_idx, int local_ridx) {
int nidx = d_position[local_ridx]; // OPTMARK: latency
if (!is_active(nidx, depth)) return;
// Only increment smallest node
bool is_smallest = (d_left_child_smallest[parent_nidx(nidx)] &&
is_left_child(nidx)) ||
(!d_left_child_smallest[parent_nidx(nidx)] &&
!is_left_child(nidx));
if (!is_smallest && depth > 0) return;
int gidx = d_gidx[local_idx];
bst_gpair gpair = d_gpair[local_ridx];
hist_builder.Add(gpair, gidx,
nidx); // OPTMARK: This is slow, could use
// shared memory or cache results
// intead of writing to global
// memory every time in atomic way.
});
}
dh::synchronize_n_devices(n_devices, dList);
// time.printElapsed("Add Time");
// (in-place) reduce each element of histogram (for only current level)
// across multiple gpus
// TODO(JCM): use out of place with pre-allocated buffer, but then have to
// copy
// back on device
// fprintf(stderr,"sizeof(bst_gpair)/sizeof(float)=%d\n",sizeof(bst_gpair)/sizeof(float));
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_nccl(ncclAllReduce(
reinterpret_cast<const void*>(hist_vec[d_idx].GetLevelPtr(depth)),
reinterpret_cast<void*>(hist_vec[d_idx].GetLevelPtr(depth)),
hist_vec[d_idx].LevelSize(depth) * sizeof(gpair_sum_t) /
sizeof(gpair_sum_t::value_t),
nccl_sum_t, ncclSum, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx])));
}
// if no NCCL, then presume only 1 GPU, then already correct
// time.printElapsed("Reduce-Add Time");
// Subtraction trick (applied to all devices in same way -- to avoid doing
// on master and then Bcast)
if (depth > 0) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
auto hist_builder = hist_vec[d_idx].GetBuilder();
auto d_left_child_smallest = left_child_smallest[d_idx].data();
int n_sub_bins = (n_nodes_level(depth) / 2) * hist_builder.n_bins;
dh::launch_n(device_idx, n_sub_bins, [=] __device__(int idx) {
int nidx = n_nodes(depth - 1) + ((idx / hist_builder.n_bins) * 2);
bool left_smallest = d_left_child_smallest[parent_nidx(nidx)];
if (left_smallest) {
nidx++; // If left is smallest switch to right child
}
int gidx = idx % hist_builder.n_bins;
gpair_sum_t parent = hist_builder.Get(gidx, parent_nidx(nidx));
int other_nidx = left_smallest ? nidx - 1 : nidx + 1;
gpair_sum_t other = hist_builder.Get(gidx, other_nidx);
gpair_sum_t sub = parent - other;
hist_builder.Add(
bst_gpair(sub.GetGrad(), sub.GetHess()), gidx,
nidx); // OPTMARK: This is slow, could use shared
// memory or cache results intead of writing to
// global memory every time in atomic way.
});
}
dh::synchronize_n_devices(n_devices, dList);
}
}
#define MIN_BLOCK_THREADS 32
#define CHUNK_BLOCK_THREADS 32
// MAX_BLOCK_THREADS of 1024 is hard-coded maximum block size due
// to CUDA capability 35 and above requirement
// for Maximum number of threads per block
#define MAX_BLOCK_THREADS 1024
void FindSplit(int depth) {
// Specialised based on max_bins
this->FindSplitSpecialize(depth, Int<MIN_BLOCK_THREADS>());
}
template <int BLOCK_THREADS>
void FindSplitSpecialize(int depth, Int<BLOCK_THREADS>) {
if (param.max_bin <= BLOCK_THREADS) {
LaunchFindSplit<BLOCK_THREADS>(depth);
} else {
this->FindSplitSpecialize(depth,
Int<BLOCK_THREADS + CHUNK_BLOCK_THREADS>());
}
}
void FindSplitSpecialize(int depth, Int<MAX_BLOCK_THREADS>) {
this->LaunchFindSplit<MAX_BLOCK_THREADS>(depth);
}
template <int BLOCK_THREADS>
void LaunchFindSplit(int depth) {
bool colsample =
param.colsample_bylevel < 1.0 || param.colsample_bytree < 1.0;
int num_nodes_device = n_nodes_level(depth);
const int GRID_SIZE = num_nodes_device;
// all GPUs do same work
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
int nodes_offset_device = 0;
hipLaunchKernelGGL(( find_split_kernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), 0, 0,
hist_vec[d_idx].GetLevelPtr(depth), feature_segments[d_idx].data(),
depth, (info->num_col), (hmat_.row_ptr.back()), nodes[d_idx].data(),
nodes_offset_device, fidx_min_map[d_idx].data(),
gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param),
left_child_smallest[d_idx].data(), colsample,
feature_flags[d_idx].data());
}
// NOTE: No need to syncrhonize with host as all above pure P2P ops or
// on-device ops
}
void InitFirstNode(const std::vector<bst_gpair>& gpair) {
// Perform asynchronous reduction on each gpu
std::vector<bst_gpair> device_sums(n_devices);
#pragma omp parallel for num_threads(n_devices)
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
auto begin = device_gpair[d_idx].tbegin();
auto end = device_gpair[d_idx].tend();
bst_gpair init = bst_gpair();
auto binary_op = thrust::plus<bst_gpair>();
device_sums[d_idx] = thrust::reduce(begin, end, init, binary_op);
}
bst_gpair sum = bst_gpair();
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
sum += device_sums[d_idx];
}
// Setup first node so all devices have same first node (here done same on
// all devices, or could have done one device and Bcast if worried about
// exact precision issues)
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_nodes = nodes[d_idx].data();
auto gpu_param = GPUTrainingParam(param);
dh::launch_n(device_idx, 1, [=] __device__(int idx) {
bst_gpair sum_gradients = sum;
d_nodes[idx] = DeviceDenseNode(sum_gradients, 0, gpu_param);
});
}
// synch all devices to host before moving on (No, can avoid because
// BuildHist calls another kernel in default stream)
// dh::synchronize_n_devices(n_devices, dList);
}
void UpdatePosition(int depth) {
if (is_dense) {
this->UpdatePositionDense(depth);
} else {
this->UpdatePositionSparse(depth);
}
}
void UpdatePositionDense(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_position = position[d_idx].data();
DeviceDenseNode* d_nodes = nodes[d_idx].data();
auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data();
auto d_gidx = device_matrix[d_idx].gidx;
int n_columns = info->num_col;
size_t begin = device_row_segments[d_idx];
size_t end = device_row_segments[d_idx + 1];
dh::launch_n(device_idx, end - begin, [=] __device__(size_t local_idx) {
int pos = d_position[local_idx];
if (!is_active(pos, depth)) {
return;
}
DeviceDenseNode node = d_nodes[pos];
if (node.IsLeaf()) {
return;
}
int gidx = d_gidx[local_idx * static_cast<size_t>(n_columns) +
static_cast<size_t>(node.fidx)];
float fvalue = d_gidx_fvalue_map[gidx];
if (fvalue <= node.fvalue) {
d_position[local_idx] = left_child_nidx(pos);
} else {
d_position[local_idx] = right_child_nidx(pos);
}
});
}
dh::synchronize_n_devices(n_devices, dList);
// dh::safe_cuda(hipDeviceSynchronize());
}
void UpdatePositionSparse(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_position = position[d_idx].data();
auto d_position_tmp = position_tmp[d_idx].data();
DeviceDenseNode* d_nodes = nodes[d_idx].data();
auto d_gidx_feature_map = gidx_feature_map[d_idx].data();
auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data();
auto d_gidx = device_matrix[d_idx].gidx;
auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin();
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
size_t element_begin = device_element_segments[d_idx];
size_t element_end = device_element_segments[d_idx + 1];
// Update missing direction
dh::launch_n(device_idx, row_end - row_begin,
[=] __device__(int local_idx) {
int pos = d_position[local_idx];
if (!is_active(pos, depth)) {
d_position_tmp[local_idx] = pos;
return;
}
DeviceDenseNode node = d_nodes[pos];
if (node.IsLeaf()) {
d_position_tmp[local_idx] = pos;
return;
} else if (node.dir == LeftDir) {
d_position_tmp[local_idx] = pos * 2 + 1;
} else {
d_position_tmp[local_idx] = pos * 2 + 2;
}
});
// Update node based on fvalue where exists
// OPTMARK: This kernel is very inefficient for both compute and memory,
// dominated by memory dependency / access patterns
dh::TransformLbs(
device_idx, &temp_memory[d_idx], element_end - element_begin,
d_row_ptr, row_end - row_begin, is_dense,
[=] __device__(size_t local_idx, int local_ridx) {
int pos = d_position[local_ridx];
if (!is_active(pos, depth)) {
return;
}
DeviceDenseNode node = d_nodes[pos];
if (node.IsLeaf()) {
return;
}
int gidx = d_gidx[local_idx];
int findex =
d_gidx_feature_map[gidx]; // OPTMARK: slowest global
// memory access, maybe setup
// position, gidx, etc. as
// combined structure?
if (findex == node.fidx) {
float fvalue = d_gidx_fvalue_map[gidx];
if (fvalue <= node.fvalue) {
d_position_tmp[local_ridx] = left_child_nidx(pos);
} else {
d_position_tmp[local_ridx] = right_child_nidx(pos);
}
}
});
position[d_idx] = position_tmp[d_idx];
}
dh::synchronize_n_devices(n_devices, dList);
}
void ColSampleTree() {
if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return;
feature_set_tree.resize(info->num_col);
std::iota(feature_set_tree.begin(), feature_set_tree.end(), 0);
feature_set_tree = col_sample(feature_set_tree, param.colsample_bytree);
}
void ColSampleLevel() {
if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return;
feature_set_level.resize(feature_set_tree.size());
feature_set_level = col_sample(feature_set_tree, param.colsample_bylevel);
std::vector<int> h_feature_flags(info->num_col, 0);
for (auto fidx : feature_set_level) {
h_feature_flags[fidx] = 1;
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
feature_flags[d_idx] = h_feature_flags;
}
dh::synchronize_n_devices(n_devices, dList);
}
bool UpdatePredictionCache(const DMatrix* data,
std::vector<bst_float>* p_out_preds) override {
std::vector<bst_float>& out_preds = *p_out_preds;
if (nodes.empty() || !p_last_fmat_ || data != p_last_fmat_) {
return false;
}
if (!prediction_cache_initialised) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
prediction_cache[d_idx].copy(out_preds.begin() + row_begin,
out_preds.begin() + row_end);
}
prediction_cache_initialised = true;
}
dh::synchronize_n_devices(n_devices, dList);
float eps = param.learning_rate;
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
auto d_nodes = nodes[d_idx].data();
auto d_position = position[d_idx].data();
auto d_prediction_cache = prediction_cache[d_idx].data();
dh::launch_n(device_idx, prediction_cache[d_idx].size(),
[=] __device__(int local_idx) {
int pos = d_position[local_idx];
d_prediction_cache[local_idx] += d_nodes[pos].weight * eps;
});
thrust::copy(prediction_cache[d_idx].tbegin(),
prediction_cache[d_idx].tend(), &out_preds[row_begin]);
}
dh::synchronize_n_devices(n_devices, dList);
return true;
}
void UpdateTree(const std::vector<bst_gpair>& gpair, DMatrix* p_fmat,
RegTree* p_tree) {
dh::Timer time0;
this->InitData(gpair, *p_fmat, *p_tree);
this->InitFirstNode(gpair);
this->ColSampleTree();
for (int depth = 0; depth < param.max_depth; depth++) {
this->ColSampleLevel();
this->BuildHist(depth);
this->FindSplit(depth);
this->UpdatePosition(depth);
}
// done with multi-GPU, pass back result from master to tree on host
int master_device = dList[0];
dh::safe_cuda(hipSetDevice(master_device));
dense2sparse_tree(p_tree, nodes[0], param);
gpu_time += time0.elapsedSeconds();
if (param.debug_verbose) {
LOG(CONSOLE)
<< "[GPU Plug-in] Cumulative GPU Time excluding initial time "
<< (gpu_time - gpu_init_time) << " sec";
fflush(stdout);
}
if (param.debug_verbose) {
LOG(CONSOLE) << "[GPU Plug-in] Cumulative CPU Time "
<< cpu_time.elapsedSeconds() << " sec";
LOG(CONSOLE)
<< "[GPU Plug-in] Cumulative CPU Time excluding initial time "
<< (cpu_time.elapsedSeconds() - cpu_init_time - gpu_time) << " sec";
fflush(stdout);
}
}
protected:
TrainParam param;
// std::unique_ptr<GPUHistBuilder> builder;
common::HistCutMatrix hmat_;
common::GHistIndexMatrix gmat_;
MetaInfo* info;
bool initialised;
bool is_dense;
const DMatrix* p_last_fmat_;
bool prediction_cache_initialised;
dh::bulk_allocator<dh::memory_type::DEVICE> ba;
std::vector<int> feature_set_tree;
std::vector<int> feature_set_level;
bst_uint num_rows;
int n_devices;
// below vectors are for each devices used
std::vector<int> dList;
std::vector<int> device_row_segments;
std::vector<size_t> device_element_segments;
std::vector<dh::CubMemory> temp_memory;
std::vector<DeviceHist> hist_vec;
std::vector<dh::dvec<DeviceDenseNode>> nodes;
std::vector<dh::dvec<DeviceDenseNode>> nodes_temp;
std::vector<dh::dvec<DeviceDenseNode>> nodes_child_temp;
std::vector<dh::dvec<bool>> left_child_smallest;
std::vector<dh::dvec<bool>> left_child_smallest_temp;
std::vector<dh::dvec<int>> feature_flags;
std::vector<dh::dvec<float>> fidx_min_map;
std::vector<dh::dvec<int>> feature_segments;
std::vector<dh::dvec<bst_float>> prediction_cache;
std::vector<dh::dvec<int>> position;
std::vector<dh::dvec<int>> position_tmp;
std::vector<DeviceGMat> device_matrix;
std::vector<dh::dvec<bst_gpair>> device_gpair;
std::vector<dh::dvec<int>> gidx_feature_map;
std::vector<dh::dvec<float>> gidx_fvalue_map;
std::vector<hipStream_t*> streams;
std::vector<ncclComm_t> comms;
std::vector<std::vector<ncclComm_t>> find_split_comms;
double cpu_init_time;
double gpu_init_time;
dh::Timer cpu_time;
double gpu_time;
};
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
} // namespace tree
} // namespace xgboost
| 3f2af129b940ca02273147c3ba9b3a18308addef.cu | /*!
* Copyright 2017 XGBoost contributors
*/
#include <xgboost/tree_updater.h>
#include <memory>
#include <utility>
#include <vector>
#include "../common/compressed_iterator.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "param.h"
#include "updater_gpu_common.cuh"
namespace xgboost {
namespace tree {
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
typedef bst_gpair_integer gpair_sum_t;
static const ncclDataType_t nccl_sum_t = ncclInt64;
// Helper for explicit template specialisation
template <int N>
struct Int {};
struct DeviceGMat {
dh::dvec<common::compressed_byte_t> gidx_buffer;
common::CompressedIterator<uint32_t> gidx;
dh::dvec<size_t> row_ptr;
void Init(int device_idx, const common::GHistIndexMatrix& gmat,
bst_ulong element_begin, bst_ulong element_end, bst_ulong row_begin,
bst_ulong row_end, int n_bins) {
dh::safe_cuda(cudaSetDevice(device_idx));
CHECK(gidx_buffer.size()) << "gidx_buffer must be externally allocated";
CHECK_EQ(row_ptr.size(), (row_end - row_begin) + 1)
<< "row_ptr must be externally allocated";
common::CompressedBufferWriter cbw(n_bins);
std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size());
cbw.Write(host_buffer.data(), gmat.index.begin() + element_begin,
gmat.index.begin() + element_end);
gidx_buffer = host_buffer;
gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), n_bins);
// row_ptr
thrust::copy(gmat.row_ptr.data() + row_begin,
gmat.row_ptr.data() + row_end + 1, row_ptr.tbegin());
// normalise row_ptr
size_t start = gmat.row_ptr[row_begin];
thrust::transform(row_ptr.tbegin(), row_ptr.tend(), row_ptr.tbegin(),
[=] __device__(size_t val) { return val - start; });
}
};
struct HistHelper {
gpair_sum_t* d_hist;
int n_bins;
__host__ __device__ HistHelper(gpair_sum_t* ptr, int n_bins)
: d_hist(ptr), n_bins(n_bins) {}
__device__ void Add(bst_gpair gpair, int gidx, int nidx) const {
int hist_idx = nidx * n_bins + gidx;
auto dst_ptr = reinterpret_cast<unsigned long long int*>(&d_hist[hist_idx]); // NOLINT
gpair_sum_t tmp(gpair.GetGrad(), gpair.GetHess());
auto src_ptr = reinterpret_cast<gpair_sum_t::value_t*>(&tmp);
atomicAdd(dst_ptr, static_cast<unsigned long long int>(*src_ptr)); // NOLINT
atomicAdd(dst_ptr + 1, static_cast<unsigned long long int>(*(src_ptr + 1))); // NOLINT
}
__device__ gpair_sum_t Get(int gidx, int nidx) const {
return d_hist[nidx * n_bins + gidx];
}
};
struct DeviceHist {
int n_bins;
dh::dvec<gpair_sum_t> data;
void Init(int n_bins_in) {
this->n_bins = n_bins_in;
CHECK(!data.empty()) << "DeviceHist must be externally allocated";
}
void Reset(int device_idx) {
cudaSetDevice(device_idx);
data.fill(gpair_sum_t());
}
HistHelper GetBuilder() { return HistHelper(data.data(), n_bins); }
gpair_sum_t* GetLevelPtr(int depth) {
return data.data() + n_nodes(depth - 1) * n_bins;
}
int LevelSize(int depth) { return n_bins * n_nodes_level(depth); }
};
struct SplitCandidate {
float loss_chg;
bool missing_left;
float fvalue;
int findex;
gpair_sum_t left_sum;
gpair_sum_t right_sum;
__host__ __device__ SplitCandidate()
: loss_chg(-FLT_MAX), missing_left(true), fvalue(0), findex(-1) {}
__device__ void Update(float loss_chg_in, bool missing_left_in,
float fvalue_in, int findex_in,
gpair_sum_t left_sum_in, gpair_sum_t right_sum_in,
const GPUTrainingParam& param) {
if (loss_chg_in > loss_chg &&
left_sum_in.GetHess() >= param.min_child_weight &&
right_sum_in.GetHess() >= param.min_child_weight) {
loss_chg = loss_chg_in;
missing_left = missing_left_in;
fvalue = fvalue_in;
left_sum = left_sum_in;
right_sum = right_sum_in;
findex = findex_in;
}
}
__device__ bool IsValid() const { return loss_chg > 0.0f; }
};
struct GpairCallbackOp {
// Running prefix
gpair_sum_t running_total;
// Constructor
__device__ GpairCallbackOp() : running_total(gpair_sum_t()) {}
__device__ bst_gpair operator()(bst_gpair block_aggregate) {
gpair_sum_t old_prefix = running_total;
running_total += block_aggregate;
return old_prefix;
}
};
template <int BLOCK_THREADS>
__global__ void find_split_kernel(
const gpair_sum_t* d_level_hist, int* d_feature_segments, int depth,
int n_features, int n_bins, DeviceDenseNode* d_nodes,
int nodes_offset_device, float* d_fidx_min_map, float* d_gidx_fvalue_map,
GPUTrainingParam gpu_param, bool* d_left_child_smallest_temp,
bool colsample, int* d_feature_flags) {
typedef cub::KeyValuePair<int, float> ArgMaxT;
typedef cub::BlockScan<gpair_sum_t, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>
BlockScanT;
typedef cub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT;
typedef cub::BlockReduce<gpair_sum_t, BLOCK_THREADS> SumReduceT;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
__shared__ cub::Uninitialized<SplitCandidate> uninitialized_split;
SplitCandidate& split = uninitialized_split.Alias();
__shared__ cub::Uninitialized<gpair_sum_t> uninitialized_sum;
gpair_sum_t& shared_sum = uninitialized_sum.Alias();
__shared__ ArgMaxT block_max;
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
split = SplitCandidate();
}
__syncthreads();
// below two are for accessing full-sized node list stored on each device
// always one block per node, BLOCK_THREADS threads per block
int level_node_idx = blockIdx.x + nodes_offset_device;
int node_idx = n_nodes(depth - 1) + level_node_idx;
for (int fidx = 0; fidx < n_features; fidx++) {
if (colsample && d_feature_flags[fidx] == 0) continue;
int begin = d_feature_segments[level_node_idx * n_features + fidx];
int end = d_feature_segments[level_node_idx * n_features + fidx + 1];
gpair_sum_t feature_sum = gpair_sum_t();
for (int reduce_begin = begin; reduce_begin < end;
reduce_begin += BLOCK_THREADS) {
bool thread_active = reduce_begin + threadIdx.x < end;
// Scan histogram
gpair_sum_t bin = thread_active ? d_level_hist[reduce_begin + threadIdx.x]
: gpair_sum_t();
feature_sum +=
SumReduceT(temp_storage.sum_reduce).Reduce(bin, cub::Sum());
}
if (threadIdx.x == 0) {
shared_sum = feature_sum;
}
// __syncthreads(); // no need to synch because below there is a Scan
GpairCallbackOp prefix_op = GpairCallbackOp();
for (int scan_begin = begin; scan_begin < end;
scan_begin += BLOCK_THREADS) {
bool thread_active = scan_begin + threadIdx.x < end;
gpair_sum_t bin = thread_active ? d_level_hist[scan_begin + threadIdx.x]
: gpair_sum_t();
BlockScanT(temp_storage.scan)
.ExclusiveScan(bin, bin, cub::Sum(), prefix_op);
// Calculate gain
gpair_sum_t parent_sum = gpair_sum_t(d_nodes[node_idx].sum_gradients);
float parent_gain = d_nodes[node_idx].root_gain;
gpair_sum_t missing = parent_sum - shared_sum;
bool missing_left;
float gain = thread_active
? loss_chg_missing(bin, missing, parent_sum, parent_gain,
gpu_param, missing_left)
: -FLT_MAX;
__syncthreads();
// Find thread with best gain
ArgMaxT tuple(threadIdx.x, gain);
ArgMaxT best =
MaxReduceT(temp_storage.max_reduce).Reduce(tuple, cub::ArgMax());
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
float fvalue;
int gidx = (scan_begin - (level_node_idx * n_bins)) + threadIdx.x;
if (threadIdx.x == 0 &&
begin == scan_begin) { // check at start of first tile
fvalue = d_fidx_min_map[fidx];
} else {
fvalue = d_gidx_fvalue_map[gidx - 1];
}
gpair_sum_t left = missing_left ? bin + missing : bin;
gpair_sum_t right = parent_sum - left;
split.Update(gain, missing_left, fvalue, fidx, left, right, gpu_param);
}
__syncthreads();
} // end scan
} // end over features
// Create node
if (threadIdx.x == 0 && split.IsValid()) {
d_nodes[node_idx].SetSplit(split.fvalue, split.findex,
split.missing_left ? LeftDir : RightDir);
DeviceDenseNode& left_child = d_nodes[left_child_nidx(node_idx)];
DeviceDenseNode& right_child = d_nodes[right_child_nidx(node_idx)];
bool& left_child_smallest = d_left_child_smallest_temp[node_idx];
left_child =
DeviceDenseNode(split.left_sum, left_child_nidx(node_idx), gpu_param);
right_child =
DeviceDenseNode(split.right_sum, right_child_nidx(node_idx), gpu_param);
// Record smallest node
if (split.left_sum.GetHess() <= split.right_sum.GetHess()) {
left_child_smallest = true;
} else {
left_child_smallest = false;
}
}
}
class GPUHistMaker : public TreeUpdater {
public:
GPUHistMaker()
: initialised(false),
is_dense(false),
p_last_fmat_(nullptr),
prediction_cache_initialised(false) {}
~GPUHistMaker() {
if (initialised) {
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
ncclCommDestroy(comms[d_idx]);
dh::safe_cuda(cudaSetDevice(dList[d_idx]));
dh::safe_cuda(cudaStreamDestroy(*(streams[d_idx])));
}
for (int num_d = 1; num_d <= n_devices;
++num_d) { // loop over number of devices used
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
ncclCommDestroy(find_split_comms[num_d - 1][d_idx]);
}
}
}
}
void Init(
const std::vector<std::pair<std::string, std::string>>& args) override {
param.InitAllowUnknown(args);
CHECK(param.max_depth < 16) << "Tree depth too large.";
CHECK(param.max_depth != 0) << "Tree depth cannot be 0.";
CHECK(param.grow_policy != TrainParam::kLossGuide)
<< "Loss guided growth policy not supported. Use CPU algorithm.";
this->param = param;
CHECK(param.n_gpus != 0) << "Must have at least one device";
}
void Update(const std::vector<bst_gpair>& gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
GradStats::CheckInfo(dmat->info());
// rescale learning rate according to size of trees
float lr = param.learning_rate;
param.learning_rate = lr / trees.size();
// build tree
try {
for (size_t i = 0; i < trees.size(); ++i) {
this->UpdateTree(gpair, dmat, trees[i]);
}
} catch (const std::exception& e) {
LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl;
}
param.learning_rate = lr;
}
void InitData(const std::vector<bst_gpair>& gpair, DMatrix& fmat, // NOLINT
const RegTree& tree) {
dh::Timer time1;
// set member num_rows and n_devices for rest of GPUHistBuilder members
info = &fmat.info();
num_rows = info->num_row;
n_devices = dh::n_devices(param.n_gpus, num_rows);
if (!initialised) {
// reset static timers used across iterations
cpu_init_time = 0;
gpu_init_time = 0;
cpu_time.reset();
gpu_time = 0;
// set dList member
dList.resize(n_devices);
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices();
dList[d_idx] = device_idx;
}
// initialize nccl
comms.resize(n_devices);
streams.resize(n_devices);
dh::safe_nccl(ncclCommInitAll(comms.data(), n_devices,
dList.data())); // initialize communicator
// (One communicator per
// process)
// printf("# NCCL: Using devices\n");
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
streams[d_idx] =
reinterpret_cast<cudaStream_t*>(malloc(sizeof(cudaStream_t)));
dh::safe_cuda(cudaSetDevice(dList[d_idx]));
dh::safe_cuda(cudaStreamCreate(streams[d_idx]));
int cudaDev;
int rank;
cudaDeviceProp prop;
dh::safe_nccl(ncclCommCuDevice(comms[d_idx], &cudaDev));
dh::safe_nccl(ncclCommUserRank(comms[d_idx], &rank));
dh::safe_cuda(cudaGetDeviceProperties(&prop, cudaDev));
// printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
// prop.pciBusID, prop.name);
// cudaDriverGetVersion(&driverVersion);
// cudaRuntimeGetVersion(&runtimeVersion);
std::ostringstream oss;
oss << "CUDA Capability Major/Minor version number: " << prop.major
<< "." << prop.minor << " is insufficient. Need >=3.5.";
int failed = prop.major < 3 || prop.major == 3 && prop.minor < 5;
CHECK(failed == 0) << oss.str();
}
// local find_split group of comms for each case of reduced number of
// GPUs to use
find_split_comms.resize(
n_devices,
std::vector<ncclComm_t>(n_devices)); // TODO(JCM): Excessive, but
// ok, and best to do
// here instead of
// repeatedly
for (int num_d = 1; num_d <= n_devices;
++num_d) { // loop over number of devices used
dh::safe_nccl(
ncclCommInitAll(find_split_comms[num_d - 1].data(), num_d,
dList.data())); // initialize communicator
// (One communicator per
// process)
}
is_dense = info->num_nonzero == info->num_col * info->num_row;
dh::Timer time0;
hmat_.Init(&fmat, param.max_bin);
cpu_init_time += time0.elapsedSeconds();
if (param.debug_verbose) { // Only done once for each training session
LOG(CONSOLE) << "[GPU Plug-in] CPU Time for hmat_.Init "
<< time0.elapsedSeconds() << " sec";
fflush(stdout);
}
time0.reset();
gmat_.cut = &hmat_;
cpu_init_time += time0.elapsedSeconds();
if (param.debug_verbose) { // Only done once for each training session
LOG(CONSOLE) << "[GPU Plug-in] CPU Time for gmat_.cut "
<< time0.elapsedSeconds() << " sec";
fflush(stdout);
}
time0.reset();
gmat_.Init(&fmat);
cpu_init_time += time0.elapsedSeconds();
if (param.debug_verbose) { // Only done once for each training session
LOG(CONSOLE) << "[GPU Plug-in] CPU Time for gmat_.Init() "
<< time0.elapsedSeconds() << " sec";
fflush(stdout);
}
time0.reset();
if (param.debug_verbose) { // Only done once for each training session
LOG(CONSOLE)
<< "[GPU Plug-in] CPU Time for hmat_.Init, gmat_.cut, gmat_.Init "
<< cpu_init_time << " sec";
fflush(stdout);
}
int n_bins = hmat_.row_ptr.back();
int n_features = hmat_.row_ptr.size() - 1;
// deliniate data onto multiple gpus
device_row_segments.push_back(0);
device_element_segments.push_back(0);
bst_uint offset = 0;
bst_uint shard_size =
std::ceil(static_cast<double>(num_rows) / n_devices);
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
offset += shard_size;
offset = std::min(offset, num_rows);
device_row_segments.push_back(offset);
device_element_segments.push_back(gmat_.row_ptr[offset]);
}
// Build feature segments
std::vector<int> h_feature_segments;
for (int node = 0; node < n_nodes_level(param.max_depth - 1); node++) {
for (int fidx = 0; fidx < n_features; fidx++) {
h_feature_segments.push_back(hmat_.row_ptr[fidx] + node * n_bins);
}
}
h_feature_segments.push_back(n_nodes_level(param.max_depth - 1) * n_bins);
// Construct feature map
std::vector<int> h_gidx_feature_map(n_bins);
for (int fidx = 0; fidx < n_features; fidx++) {
for (int i = hmat_.row_ptr[fidx]; i < hmat_.row_ptr[fidx + 1]; i++) {
h_gidx_feature_map[i] = fidx;
}
}
int level_max_bins = n_nodes_level(param.max_depth - 1) * n_bins;
// allocate unique common data that reside on master device (NOTE: None
// currently)
// int master_device=dList[0];
// ba.allocate(master_device, );
// allocate vectors across all devices
temp_memory.resize(n_devices);
hist_vec.resize(n_devices);
nodes.resize(n_devices);
nodes_temp.resize(n_devices);
nodes_child_temp.resize(n_devices);
left_child_smallest.resize(n_devices);
left_child_smallest_temp.resize(n_devices);
feature_flags.resize(n_devices);
fidx_min_map.resize(n_devices);
feature_segments.resize(n_devices);
prediction_cache.resize(n_devices);
position.resize(n_devices);
position_tmp.resize(n_devices);
device_matrix.resize(n_devices);
device_gpair.resize(n_devices);
gidx_feature_map.resize(n_devices);
gidx_fvalue_map.resize(n_devices);
int find_split_n_devices = std::pow(2, std::floor(std::log2(n_devices)));
find_split_n_devices =
std::min(n_nodes_level(param.max_depth), find_split_n_devices);
int max_num_nodes_device =
n_nodes_level(param.max_depth) / find_split_n_devices;
// num_rows_segment: for sharding rows onto gpus for splitting data
// num_elements_segment: for sharding rows (of elements) onto gpus for
// splitting data
// max_num_nodes_device: for sharding nodes onto gpus for split finding
// All other variables have full copy on gpu, with copy either being
// identical or just current portion (like for histogram) before
// AllReduce
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
bst_uint num_rows_segment =
device_row_segments[d_idx + 1] - device_row_segments[d_idx];
bst_ulong num_elements_segment =
device_element_segments[d_idx + 1] - device_element_segments[d_idx];
ba.allocate(
device_idx, param.silent, &(hist_vec[d_idx].data),
n_nodes(param.max_depth - 1) * n_bins, &nodes[d_idx],
n_nodes(param.max_depth), &nodes_temp[d_idx], max_num_nodes_device,
&nodes_child_temp[d_idx], max_num_nodes_device,
&left_child_smallest[d_idx], n_nodes(param.max_depth),
&left_child_smallest_temp[d_idx], max_num_nodes_device,
&feature_flags[d_idx],
n_features, // may change but same on all devices
&fidx_min_map[d_idx],
hmat_.min_val.size(), // constant and same on all devices
&feature_segments[d_idx],
h_feature_segments.size(), // constant and same on all devices
&prediction_cache[d_idx], num_rows_segment, &position[d_idx],
num_rows_segment, &position_tmp[d_idx], num_rows_segment,
&device_gpair[d_idx], num_rows_segment,
&device_matrix[d_idx].gidx_buffer,
common::CompressedBufferWriter::CalculateBufferSize(
num_elements_segment,
n_bins), // constant and same on all devices
&device_matrix[d_idx].row_ptr, num_rows_segment + 1,
&gidx_feature_map[d_idx],
n_bins, // constant and same on all devices
&gidx_fvalue_map[d_idx],
hmat_.cut.size()); // constant and same on all devices
// Copy Host to Device (assumes comes after ba.allocate that sets
// device)
device_matrix[d_idx].Init(
device_idx, gmat_, device_element_segments[d_idx],
device_element_segments[d_idx + 1], device_row_segments[d_idx],
device_row_segments[d_idx + 1], n_bins);
gidx_feature_map[d_idx] = h_gidx_feature_map;
gidx_fvalue_map[d_idx] = hmat_.cut;
feature_segments[d_idx] = h_feature_segments;
fidx_min_map[d_idx] = hmat_.min_val;
// Initialize, no copy
hist_vec[d_idx].Init(n_bins); // init host object
prediction_cache[d_idx].fill(0); // init device object (assumes comes
// after ba.allocate that sets device)
feature_flags[d_idx].fill(
1); // init device object (assumes comes after
// ba.allocate that sets device)
}
}
// copy or init to do every iteration
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
nodes[d_idx].fill(DeviceDenseNode());
nodes_temp[d_idx].fill(DeviceDenseNode());
nodes_child_temp[d_idx].fill(DeviceDenseNode());
position[d_idx].fill(0);
device_gpair[d_idx].copy(gpair.begin() + device_row_segments[d_idx],
gpair.begin() + device_row_segments[d_idx + 1]);
subsample_gpair(&device_gpair[d_idx], param.subsample,
device_row_segments[d_idx]);
hist_vec[d_idx].Reset(device_idx);
// left_child_smallest and left_child_smallest_temp don't need to be
// initialized
}
dh::synchronize_n_devices(n_devices, dList);
if (!initialised) {
gpu_init_time = time1.elapsedSeconds() - cpu_init_time;
gpu_time = -cpu_init_time;
if (param.debug_verbose) { // Only done once for each training session
LOG(CONSOLE) << "[GPU Plug-in] Time for GPU operations during First "
"Call to InitData() "
<< gpu_init_time << " sec";
fflush(stdout);
}
}
p_last_fmat_ = &fmat;
initialised = true;
}
void BuildHist(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t begin = device_element_segments[d_idx];
size_t end = device_element_segments[d_idx + 1];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
auto d_gidx = device_matrix[d_idx].gidx;
auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin();
auto d_position = position[d_idx].data();
auto d_gpair = device_gpair[d_idx].data();
auto d_left_child_smallest = left_child_smallest[d_idx].data();
auto hist_builder = hist_vec[d_idx].GetBuilder();
dh::TransformLbs(
device_idx, &temp_memory[d_idx], end - begin, d_row_ptr,
row_end - row_begin, is_dense,
[=] __device__(size_t local_idx, int local_ridx) {
int nidx = d_position[local_ridx]; // OPTMARK: latency
if (!is_active(nidx, depth)) return;
// Only increment smallest node
bool is_smallest = (d_left_child_smallest[parent_nidx(nidx)] &&
is_left_child(nidx)) ||
(!d_left_child_smallest[parent_nidx(nidx)] &&
!is_left_child(nidx));
if (!is_smallest && depth > 0) return;
int gidx = d_gidx[local_idx];
bst_gpair gpair = d_gpair[local_ridx];
hist_builder.Add(gpair, gidx,
nidx); // OPTMARK: This is slow, could use
// shared memory or cache results
// intead of writing to global
// memory every time in atomic way.
});
}
dh::synchronize_n_devices(n_devices, dList);
// time.printElapsed("Add Time");
// (in-place) reduce each element of histogram (for only current level)
// across multiple gpus
// TODO(JCM): use out of place with pre-allocated buffer, but then have to
// copy
// back on device
// fprintf(stderr,"sizeof(bst_gpair)/sizeof(float)=%d\n",sizeof(bst_gpair)/sizeof(float));
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_nccl(ncclAllReduce(
reinterpret_cast<const void*>(hist_vec[d_idx].GetLevelPtr(depth)),
reinterpret_cast<void*>(hist_vec[d_idx].GetLevelPtr(depth)),
hist_vec[d_idx].LevelSize(depth) * sizeof(gpair_sum_t) /
sizeof(gpair_sum_t::value_t),
nccl_sum_t, ncclSum, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx])));
}
// if no NCCL, then presume only 1 GPU, then already correct
// time.printElapsed("Reduce-Add Time");
// Subtraction trick (applied to all devices in same way -- to avoid doing
// on master and then Bcast)
if (depth > 0) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
auto hist_builder = hist_vec[d_idx].GetBuilder();
auto d_left_child_smallest = left_child_smallest[d_idx].data();
int n_sub_bins = (n_nodes_level(depth) / 2) * hist_builder.n_bins;
dh::launch_n(device_idx, n_sub_bins, [=] __device__(int idx) {
int nidx = n_nodes(depth - 1) + ((idx / hist_builder.n_bins) * 2);
bool left_smallest = d_left_child_smallest[parent_nidx(nidx)];
if (left_smallest) {
nidx++; // If left is smallest switch to right child
}
int gidx = idx % hist_builder.n_bins;
gpair_sum_t parent = hist_builder.Get(gidx, parent_nidx(nidx));
int other_nidx = left_smallest ? nidx - 1 : nidx + 1;
gpair_sum_t other = hist_builder.Get(gidx, other_nidx);
gpair_sum_t sub = parent - other;
hist_builder.Add(
bst_gpair(sub.GetGrad(), sub.GetHess()), gidx,
nidx); // OPTMARK: This is slow, could use shared
// memory or cache results intead of writing to
// global memory every time in atomic way.
});
}
dh::synchronize_n_devices(n_devices, dList);
}
}
#define MIN_BLOCK_THREADS 32
#define CHUNK_BLOCK_THREADS 32
// MAX_BLOCK_THREADS of 1024 is hard-coded maximum block size due
// to CUDA capability 35 and above requirement
// for Maximum number of threads per block
#define MAX_BLOCK_THREADS 1024
void FindSplit(int depth) {
// Specialised based on max_bins
this->FindSplitSpecialize(depth, Int<MIN_BLOCK_THREADS>());
}
template <int BLOCK_THREADS>
void FindSplitSpecialize(int depth, Int<BLOCK_THREADS>) {
if (param.max_bin <= BLOCK_THREADS) {
LaunchFindSplit<BLOCK_THREADS>(depth);
} else {
this->FindSplitSpecialize(depth,
Int<BLOCK_THREADS + CHUNK_BLOCK_THREADS>());
}
}
void FindSplitSpecialize(int depth, Int<MAX_BLOCK_THREADS>) {
this->LaunchFindSplit<MAX_BLOCK_THREADS>(depth);
}
template <int BLOCK_THREADS>
void LaunchFindSplit(int depth) {
bool colsample =
param.colsample_bylevel < 1.0 || param.colsample_bytree < 1.0;
int num_nodes_device = n_nodes_level(depth);
const int GRID_SIZE = num_nodes_device;
// all GPUs do same work
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
int nodes_offset_device = 0;
find_split_kernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS>>>(
hist_vec[d_idx].GetLevelPtr(depth), feature_segments[d_idx].data(),
depth, (info->num_col), (hmat_.row_ptr.back()), nodes[d_idx].data(),
nodes_offset_device, fidx_min_map[d_idx].data(),
gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param),
left_child_smallest[d_idx].data(), colsample,
feature_flags[d_idx].data());
}
// NOTE: No need to syncrhonize with host as all above pure P2P ops or
// on-device ops
}
void InitFirstNode(const std::vector<bst_gpair>& gpair) {
// Perform asynchronous reduction on each gpu
std::vector<bst_gpair> device_sums(n_devices);
#pragma omp parallel for num_threads(n_devices)
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
auto begin = device_gpair[d_idx].tbegin();
auto end = device_gpair[d_idx].tend();
bst_gpair init = bst_gpair();
auto binary_op = thrust::plus<bst_gpair>();
device_sums[d_idx] = thrust::reduce(begin, end, init, binary_op);
}
bst_gpair sum = bst_gpair();
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
sum += device_sums[d_idx];
}
// Setup first node so all devices have same first node (here done same on
// all devices, or could have done one device and Bcast if worried about
// exact precision issues)
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_nodes = nodes[d_idx].data();
auto gpu_param = GPUTrainingParam(param);
dh::launch_n(device_idx, 1, [=] __device__(int idx) {
bst_gpair sum_gradients = sum;
d_nodes[idx] = DeviceDenseNode(sum_gradients, 0, gpu_param);
});
}
// synch all devices to host before moving on (No, can avoid because
// BuildHist calls another kernel in default stream)
// dh::synchronize_n_devices(n_devices, dList);
}
void UpdatePosition(int depth) {
if (is_dense) {
this->UpdatePositionDense(depth);
} else {
this->UpdatePositionSparse(depth);
}
}
void UpdatePositionDense(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_position = position[d_idx].data();
DeviceDenseNode* d_nodes = nodes[d_idx].data();
auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data();
auto d_gidx = device_matrix[d_idx].gidx;
int n_columns = info->num_col;
size_t begin = device_row_segments[d_idx];
size_t end = device_row_segments[d_idx + 1];
dh::launch_n(device_idx, end - begin, [=] __device__(size_t local_idx) {
int pos = d_position[local_idx];
if (!is_active(pos, depth)) {
return;
}
DeviceDenseNode node = d_nodes[pos];
if (node.IsLeaf()) {
return;
}
int gidx = d_gidx[local_idx * static_cast<size_t>(n_columns) +
static_cast<size_t>(node.fidx)];
float fvalue = d_gidx_fvalue_map[gidx];
if (fvalue <= node.fvalue) {
d_position[local_idx] = left_child_nidx(pos);
} else {
d_position[local_idx] = right_child_nidx(pos);
}
});
}
dh::synchronize_n_devices(n_devices, dList);
// dh::safe_cuda(cudaDeviceSynchronize());
}
void UpdatePositionSparse(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_position = position[d_idx].data();
auto d_position_tmp = position_tmp[d_idx].data();
DeviceDenseNode* d_nodes = nodes[d_idx].data();
auto d_gidx_feature_map = gidx_feature_map[d_idx].data();
auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data();
auto d_gidx = device_matrix[d_idx].gidx;
auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin();
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
size_t element_begin = device_element_segments[d_idx];
size_t element_end = device_element_segments[d_idx + 1];
// Update missing direction
dh::launch_n(device_idx, row_end - row_begin,
[=] __device__(int local_idx) {
int pos = d_position[local_idx];
if (!is_active(pos, depth)) {
d_position_tmp[local_idx] = pos;
return;
}
DeviceDenseNode node = d_nodes[pos];
if (node.IsLeaf()) {
d_position_tmp[local_idx] = pos;
return;
} else if (node.dir == LeftDir) {
d_position_tmp[local_idx] = pos * 2 + 1;
} else {
d_position_tmp[local_idx] = pos * 2 + 2;
}
});
// Update node based on fvalue where exists
// OPTMARK: This kernel is very inefficient for both compute and memory,
// dominated by memory dependency / access patterns
dh::TransformLbs(
device_idx, &temp_memory[d_idx], element_end - element_begin,
d_row_ptr, row_end - row_begin, is_dense,
[=] __device__(size_t local_idx, int local_ridx) {
int pos = d_position[local_ridx];
if (!is_active(pos, depth)) {
return;
}
DeviceDenseNode node = d_nodes[pos];
if (node.IsLeaf()) {
return;
}
int gidx = d_gidx[local_idx];
int findex =
d_gidx_feature_map[gidx]; // OPTMARK: slowest global
// memory access, maybe setup
// position, gidx, etc. as
// combined structure?
if (findex == node.fidx) {
float fvalue = d_gidx_fvalue_map[gidx];
if (fvalue <= node.fvalue) {
d_position_tmp[local_ridx] = left_child_nidx(pos);
} else {
d_position_tmp[local_ridx] = right_child_nidx(pos);
}
}
});
position[d_idx] = position_tmp[d_idx];
}
dh::synchronize_n_devices(n_devices, dList);
}
void ColSampleTree() {
if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return;
feature_set_tree.resize(info->num_col);
std::iota(feature_set_tree.begin(), feature_set_tree.end(), 0);
feature_set_tree = col_sample(feature_set_tree, param.colsample_bytree);
}
void ColSampleLevel() {
if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return;
feature_set_level.resize(feature_set_tree.size());
feature_set_level = col_sample(feature_set_tree, param.colsample_bylevel);
std::vector<int> h_feature_flags(info->num_col, 0);
for (auto fidx : feature_set_level) {
h_feature_flags[fidx] = 1;
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
feature_flags[d_idx] = h_feature_flags;
}
dh::synchronize_n_devices(n_devices, dList);
}
bool UpdatePredictionCache(const DMatrix* data,
std::vector<bst_float>* p_out_preds) override {
std::vector<bst_float>& out_preds = *p_out_preds;
if (nodes.empty() || !p_last_fmat_ || data != p_last_fmat_) {
return false;
}
if (!prediction_cache_initialised) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
prediction_cache[d_idx].copy(out_preds.begin() + row_begin,
out_preds.begin() + row_end);
}
prediction_cache_initialised = true;
}
dh::synchronize_n_devices(n_devices, dList);
float eps = param.learning_rate;
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
auto d_nodes = nodes[d_idx].data();
auto d_position = position[d_idx].data();
auto d_prediction_cache = prediction_cache[d_idx].data();
dh::launch_n(device_idx, prediction_cache[d_idx].size(),
[=] __device__(int local_idx) {
int pos = d_position[local_idx];
d_prediction_cache[local_idx] += d_nodes[pos].weight * eps;
});
thrust::copy(prediction_cache[d_idx].tbegin(),
prediction_cache[d_idx].tend(), &out_preds[row_begin]);
}
dh::synchronize_n_devices(n_devices, dList);
return true;
}
void UpdateTree(const std::vector<bst_gpair>& gpair, DMatrix* p_fmat,
RegTree* p_tree) {
dh::Timer time0;
this->InitData(gpair, *p_fmat, *p_tree);
this->InitFirstNode(gpair);
this->ColSampleTree();
for (int depth = 0; depth < param.max_depth; depth++) {
this->ColSampleLevel();
this->BuildHist(depth);
this->FindSplit(depth);
this->UpdatePosition(depth);
}
// done with multi-GPU, pass back result from master to tree on host
int master_device = dList[0];
dh::safe_cuda(cudaSetDevice(master_device));
dense2sparse_tree(p_tree, nodes[0], param);
gpu_time += time0.elapsedSeconds();
if (param.debug_verbose) {
LOG(CONSOLE)
<< "[GPU Plug-in] Cumulative GPU Time excluding initial time "
<< (gpu_time - gpu_init_time) << " sec";
fflush(stdout);
}
if (param.debug_verbose) {
LOG(CONSOLE) << "[GPU Plug-in] Cumulative CPU Time "
<< cpu_time.elapsedSeconds() << " sec";
LOG(CONSOLE)
<< "[GPU Plug-in] Cumulative CPU Time excluding initial time "
<< (cpu_time.elapsedSeconds() - cpu_init_time - gpu_time) << " sec";
fflush(stdout);
}
}
protected:
TrainParam param;
// std::unique_ptr<GPUHistBuilder> builder;
common::HistCutMatrix hmat_;
common::GHistIndexMatrix gmat_;
MetaInfo* info;
bool initialised;
bool is_dense;
const DMatrix* p_last_fmat_;
bool prediction_cache_initialised;
dh::bulk_allocator<dh::memory_type::DEVICE> ba;
std::vector<int> feature_set_tree;
std::vector<int> feature_set_level;
bst_uint num_rows;
int n_devices;
// below vectors are for each devices used
std::vector<int> dList;
std::vector<int> device_row_segments;
std::vector<size_t> device_element_segments;
std::vector<dh::CubMemory> temp_memory;
std::vector<DeviceHist> hist_vec;
std::vector<dh::dvec<DeviceDenseNode>> nodes;
std::vector<dh::dvec<DeviceDenseNode>> nodes_temp;
std::vector<dh::dvec<DeviceDenseNode>> nodes_child_temp;
std::vector<dh::dvec<bool>> left_child_smallest;
std::vector<dh::dvec<bool>> left_child_smallest_temp;
std::vector<dh::dvec<int>> feature_flags;
std::vector<dh::dvec<float>> fidx_min_map;
std::vector<dh::dvec<int>> feature_segments;
std::vector<dh::dvec<bst_float>> prediction_cache;
std::vector<dh::dvec<int>> position;
std::vector<dh::dvec<int>> position_tmp;
std::vector<DeviceGMat> device_matrix;
std::vector<dh::dvec<bst_gpair>> device_gpair;
std::vector<dh::dvec<int>> gidx_feature_map;
std::vector<dh::dvec<float>> gidx_fvalue_map;
std::vector<cudaStream_t*> streams;
std::vector<ncclComm_t> comms;
std::vector<std::vector<ncclComm_t>> find_split_comms;
double cpu_init_time;
double gpu_init_time;
dh::Timer cpu_time;
double gpu_time;
};
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
} // namespace tree
} // namespace xgboost
|
33b7d8f162203b503023a56981f06c6201e4ef8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void dev_const(float *px, float k) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
px[tid] = k;
} | 33b7d8f162203b503023a56981f06c6201e4ef8b.cu | #include "includes.h"
__global__ void dev_const(float *px, float k) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
px[tid] = k;
} |
36edfa1c573e7ddc30ec21c956fc4f018b9a7190.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "copyBiasToOutputs.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *ptrbias = NULL;
hipMalloc(&ptrbias, XSIZE*YSIZE);
float *ptroutput = NULL;
hipMalloc(&ptroutput, XSIZE*YSIZE);
const int size1 = 1;
const int size2 = 1;
const int nOutputPlane = 1;
const int linestride = 1;
const int imstride = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
copyBiasToOutputs), dim3(gridBlock),dim3(threadBlock), 0, 0, ptrbias,ptroutput,size1,size2,nOutputPlane,linestride,imstride);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
copyBiasToOutputs), dim3(gridBlock),dim3(threadBlock), 0, 0, ptrbias,ptroutput,size1,size2,nOutputPlane,linestride,imstride);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
copyBiasToOutputs), dim3(gridBlock),dim3(threadBlock), 0, 0, ptrbias,ptroutput,size1,size2,nOutputPlane,linestride,imstride);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 36edfa1c573e7ddc30ec21c956fc4f018b9a7190.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "copyBiasToOutputs.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *ptrbias = NULL;
cudaMalloc(&ptrbias, XSIZE*YSIZE);
float *ptroutput = NULL;
cudaMalloc(&ptroutput, XSIZE*YSIZE);
const int size1 = 1;
const int size2 = 1;
const int nOutputPlane = 1;
const int linestride = 1;
const int imstride = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
copyBiasToOutputs<<<gridBlock,threadBlock>>>(ptrbias,ptroutput,size1,size2,nOutputPlane,linestride,imstride);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
copyBiasToOutputs<<<gridBlock,threadBlock>>>(ptrbias,ptroutput,size1,size2,nOutputPlane,linestride,imstride);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
copyBiasToOutputs<<<gridBlock,threadBlock>>>(ptrbias,ptroutput,size1,size2,nOutputPlane,linestride,imstride);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d32c189c3745e4f899ee3b890e2d3d0684b1d9e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <Windows.h>
#include <iostream>
using namespace std;
int main()
{
hipDeviceProp_t prop;
hipError_t cudaStatus;
int count;
cudaStatus = hipGetDeviceCount( &count );
if (cudaStatus != hipSuccess)
{
cout << "hipMalloc failed!" << endl;
}
for (int i=0; i< count; i++)
{
cudaStatus = hipGetDeviceProperties( &prop, i );
if (cudaStatus != hipSuccess)
{
cout << "hipGetDeviceProperties failed!" << endl;
}
cout << " --- " << i << " ---" << "\n" << endl;
cout << "" << prop.name << "\n" << endl;
cout << "" << prop.major << "." << prop.minor << "\n" << endl;
cout << "" << prop.clockRate << "\n" << endl;
if (prop.deviceOverlap)
{
cout << "cudaMemory()!" << "\n" << endl;
}
else
{
cout << "cudaMemory()!" << "\n" << endl;
}
if (prop.kernelExecTimeoutEnabled)
{
cout << "!" << "\n" << endl;
}
else
{
cout << "!" << "\n" << endl;
}
cout << "\n" << " ------" << "\n" << endl;
cout << "" << prop.totalGlobalMem << "\n" << endl;
cout << "" << prop.totalConstMem << "\n" << endl;
cout << "" << prop.memPitch << "\n" << endl;
cout << "" << prop.textureAlignment << "\n" << endl;
cout << "\n" << " ------" << "\n" << endl;
cout << "" << prop.multiProcessorCount << "\n" << endl;
cout << "():" << prop.sharedMemPerBlock << "\n" << endl;
cout << "32" << prop.regsPerBlock << "\n" << endl;
cout << "" << prop.warpSize << "\n" << endl;
cout << "" << prop.maxThreadsPerBlock << "\n" << endl;
cout << "(Block)" << "(" << prop.maxThreadsDim[0] << "," << prop.maxThreadsDim[1] << "," << prop.maxThreadsDim[2] << ")" << "\n" << endl;
cout << "(Grid)" << "(" << prop.maxGridSize[0] << "," << prop.maxGridSize[1] << "," << prop.maxGridSize[2] << ")" << "\n" << endl;
}
Sleep(200000);
return 0;
}
| d32c189c3745e4f899ee3b890e2d3d0684b1d9e5.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <Windows.h>
#include <iostream>
using namespace std;
int main()
{
cudaDeviceProp prop;
cudaError_t cudaStatus;
int count;
cudaStatus = cudaGetDeviceCount( &count );
if (cudaStatus != cudaSuccess)
{
cout << "cudaMalloc failed!" << endl;
}
for (int i=0; i< count; i++)
{
cudaStatus = cudaGetDeviceProperties( &prop, i );
if (cudaStatus != cudaSuccess)
{
cout << "cudaGetDeviceProperties failed!" << endl;
}
cout << " --- 第" << i << "个设备信息 ---" << "\n" << endl;
cout << "显卡名字:" << prop.name << "\n" << endl;
cout << "计算能力:" << prop.major << "." << prop.minor << "\n" << endl;
cout << "时钟频率:" << prop.clockRate << "\n" << endl;
if (prop.deviceOverlap)
{
cout << "是否可以同时执行cudaMemory()调用和一个核函数调用:是!" << "\n" << endl;
}
else
{
cout << "是否可以同时执行cudaMemory()调用和一个核函数调用:否!" << "\n" << endl;
}
if (prop.kernelExecTimeoutEnabled)
{
cout << "设备上执行的核函数是否存在运行时限制:是!" << "\n" << endl;
}
else
{
cout << "设备上执行的核函数是否存在运行时限制:否!" << "\n" << endl;
}
cout << "\n" << " ---设备内存信息---" << "\n" << endl;
cout << "全局内存总量(字节):" << prop.totalGlobalMem << "\n" << endl;
cout << "常量内存总量(字节):" << prop.totalConstMem << "\n" << endl;
cout << "内存复制中的最大修正值(字节):" << prop.memPitch << "\n" << endl;
cout << "设备的纹理对齐要求:" << prop.textureAlignment << "\n" << endl;
cout << "\n" << " ---设备的多处理器信息---" << "\n" << endl;
cout << "流处理器数量:" << prop.multiProcessorCount << "\n" << endl;
cout << "一个线程块可使用的最大共享内存数量(字节):" << prop.sharedMemPerBlock << "\n" << endl;
cout << "一个线程块可使用的32位寄存器数量:" << prop.regsPerBlock << "\n" << endl;
cout << "一个线程束中包含的线程个数:" << prop.warpSize << "\n" << endl;
cout << "一个线程块中包含的最大线程数量:" << prop.maxThreadsPerBlock << "\n" << endl;
cout << "线程块(Block)维数:" << "(" << prop.maxThreadsDim[0] << "," << prop.maxThreadsDim[1] << "," << prop.maxThreadsDim[2] << ")" << "\n" << endl;
cout << "线程格(Grid)维数:" << "(" << prop.maxGridSize[0] << "," << prop.maxGridSize[1] << "," << prop.maxGridSize[2] << ")" << "\n" << endl;
}
Sleep(200000);
return 0;
}
|
63f2070872f1da0d9b91fbc51363be0f3c03cbf2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <rocblas.h>
#include "gpu_pc_v2_func.h"
void tree_gen(int** tree, int field, int rule){
for(int i = 0; i < field; i++){
tree[i][0] = rand() % 100;
int temp[rule];
temp[0] = tree[i][0];
for (int j = 1; j < rule; j++){
temp[j] = temp[j-1] + rand() % 20 + 1;
}
int temp_index = rule-1, tree_index = rule -1, level = log(rule+1) / log(2);
int step_index = level;
while (step_index >= 1){
int step = pow(2, (level - step_index + 1));
while (temp_index >= 0){
tree[i][tree_index] = temp[temp_index];
temp_index -= step;
tree_index--;
}
step_index--;
temp_index = rule - 1 - (pow(2, level - step_index) - 1);
}
}
cout<<"... Tree Gen ..."<<endl;
}
void header_gen(int** headers, int** tree, int field, int packet_num){
for (int i = 0; i < field; i++){
for(int j = 0; j < packet_num; j++){
headers[i][j] = rand() % 6000;
}
}
cout<<"... Header Gen ..."<<endl;
}
void bv_gen(long int** bv, long int* bv_final, int packet_num){
for (int i = 0; i < int_count; i++){
for (int j = 0; j < FIELD*(RULE+1); j++){
bv[j][i] = rand() % 1000000;
}
}
for(int i = 0; i < packet_num; i++){
bv_final[i] = -1;
}
cout<<"... BV Gen ..."<<endl;
}
void bv_gen_short(int* bv, int* bv_final, int packet_num){
for (int i = 0; i < FIELD*(RULE + 1)*int_count; i++){
bv[i] = rand() % 5;
}
for(int i = 0; i < packet_num; i++){
bv_final[i] = 1;
}
cout<<"... BV_Short Gen ..."<<endl;
}
void data_test(int** tree, int** headers, long int** bv, int* bv_final, int packet_num, int type){
if (type > 15 | type == 0){
return;
}
if (type % 2 == 1){
cout<<"Tree: "<<endl;
for(int i = 0; i < RULE; i++){
cout<<"Line: "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<tree[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 4 == 2 | type % 4 == 3){
cout<<endl<<"Headers: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<"Header "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<headers[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 8 == 4 | type % 8 == 5 | type % 8 == 6 | type % 8 == 7){
cout<<endl<<"bv: "<<endl;
for(int i = 0; i < ALLRULE; i++){
cout<<"Line "<<i<<": ";
for (int j = 0; j < FIELD*(RULE+1); j++){
cout<<bv[j][i]<<" ";
}
cout<<endl;
}
}
if (type > 7){
cout<<endl<<"bv_final: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<bv_final[i]<<" ";
}
cout<<endl;
}
cout<<"============== End of Print =============="<<endl;
}
__global__ void packet_classify(int* gpu_tree, int* gpu_headers, int* gpu_match_result, int packet_num){
__shared__ int gpu_tree_shared[FIELD*RULE];
int level = 0;
while(level * block_dim + threadIdx.x < FIELD * RULE){
gpu_tree_shared[level * block_dim + threadIdx.x] = gpu_tree[level * block_dim + threadIdx.x];
level++;
}
__syncthreads();
int i = 0;
while (i < RULE){
i = 2 * i + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] <= gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 1 + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] > gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 2;
}
gpu_match_result[blockDim.x * blockIdx.x + threadIdx.x] = i - RULE;
}
__global__ void pc_short(int* gpu_tree, int* gpu_headers, int* gpu_bv, int* gpu_bv_final, int packet_num){
__shared__ int gpu_tree_shared[FIELD*RULE];
__shared__ int gpu_bv_shared[FIELD*(RULE+1)*int_count];
//__shared__ int gpu_match_result[ block_dim ];
if (threadIdx.x < FIELD * RULE){
gpu_tree_shared[threadIdx.x] = gpu_tree[threadIdx.x];
}
if (threadIdx.x >= FIELD * RULE && threadIdx.x <= FIELD * (RULE + 1) * int_count){
gpu_bv_shared[threadIdx.x - FIELD * RULE] = gpu_bv[threadIdx.x - FIELD * RULE];
}
__syncthreads();
int index = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ int partial_result;
partial_result = 0xffffffff;
for (int j = 0; j < FIELD; j++){
int i = 0;
while (i < RULE){
i = 2 * i + (gpu_headers[index * FIELD + j] <= gpu_tree_shared[index % FIELD * RULE + i]) * 1 + (gpu_headers[index * FIELD + j] > gpu_tree_shared[index % FIELD * RULE + i]) * 2;
}
partial_result &= gpu_bv_shared[i - RULE];
}
gpu_bv_final[ index ] = partial_result;
//int i = 0, j = 0;
//int index = blockDim.x*blockIdx.x + threadIdx.x;
//while (i < RULE){
// i = 2 * i + (gpu_headers[index] <= gpu_tree_shared[index / packet_num * RULE+i]) * 1 + (gpu_headers[index] > gpu_tree_shared[index / packet_num * RULE+i]) * 2;
//}
//gpu_bv_final[ index / FIELD ] = gpu_bv_shared[i - RULE];
//__syncthreads();
//if(threadIdx.x < block_dim/ FIELD){
// for (j = 0; j < FIELD;j++){
// gpu_bv_final[blockIdx.x * (block_dim / FIELD) + threadIdx.x] &= gpu_match_result[threadIdx.x * FIELD + j];
// //gpu_bv_final[blockDim.x * (block_dim / FIELD) + threadIdx.x] &= 1;
// }
//}
//gpu_match_result[blockDim.x * blockIdx.x + threadIdx.x] = i - RULE;
}
__global__ void packet_merge(long int* gpu_bv, int* gpu_match_result, long int* gpu_merge_result, long int*gpu_bv_final, int packet_num){
int index = blockDim.x * blockIdx.x + threadIdx.x;
int packetIdx = index/int_count;
gpu_merge_result[index] = gpu_bv[gpu_match_result[packetIdx*15]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+1]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+2]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+3]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+4]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+5]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+6]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+7]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+8]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+9]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+10]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+11]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+12]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+13]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+14]*int_count + index%int_count];
__syncthreads();
if (blockDim.x * blockIdx.x + threadIdx.x < packet_num){
gpu_bv_final[blockDim.x*blockIdx.x+threadIdx.x] = gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+1] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+2] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+3] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+4] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+5] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+6] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+7] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+8] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+9] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+10] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+11] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+12] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+13] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+14] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+15] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+16] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+17] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+18] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+19] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+20] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+21] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+22] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+23] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+24] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+25] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+26] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+27] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+28] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+29] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+30] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+31];
}
}
void merge(void* foo){
pthread_param_C* param = (pthread_param_C*) foo;
for (int i = 0; i < param->BATCH; i++){
//cout<<"[ Merge ] Thread: "<<param->thread_id<<", header # "<<i<<endl;
for (int j = 0; j < int_count; j++){
/*long int merge_partial = 0xffffffffffffffff;
for (int k = 0; k < FIELD; k++){
merge_partial &= param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + k]][j];
if (merge_partial == 0){
break;
}
}
if (merge_partial != 0){
param->merge_result[(param->thread_id * param->BATCH + i) * int_count + j] = merge_partial;
break;
}*/
param->merge_result[(param->thread_id * param->BATCH + i) * int_count + j] = param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 0]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 1]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 2]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 3]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 4]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 5]][j];/*
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 6]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 7]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 8]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 9]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 10]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 11]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 12]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 13]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 14]][j];*/
}
}
//cout<<"Thread "<<param->thread_id<<" finish!"<<endl;
}
void partial_merge(void* foo){
pthread_param_P* param = (pthread_param_P*) foo;
}
void final_merge(void* foo){
pthread_param_F* param = (pthread_param_F*) foo;
}
| 63f2070872f1da0d9b91fbc51363be0f3c03cbf2.cu | #include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <cublas.h>
#include "gpu_pc_v2_func.h"
void tree_gen(int** tree, int field, int rule){
for(int i = 0; i < field; i++){
tree[i][0] = rand() % 100;
int temp[rule];
temp[0] = tree[i][0];
for (int j = 1; j < rule; j++){
temp[j] = temp[j-1] + rand() % 20 + 1;
}
int temp_index = rule-1, tree_index = rule -1, level = log(rule+1) / log(2);
int step_index = level;
while (step_index >= 1){
int step = pow(2, (level - step_index + 1));
while (temp_index >= 0){
tree[i][tree_index] = temp[temp_index];
temp_index -= step;
tree_index--;
}
step_index--;
temp_index = rule - 1 - (pow(2, level - step_index) - 1);
}
}
cout<<"... Tree Gen ..."<<endl;
}
void header_gen(int** headers, int** tree, int field, int packet_num){
for (int i = 0; i < field; i++){
for(int j = 0; j < packet_num; j++){
headers[i][j] = rand() % 6000;
}
}
cout<<"... Header Gen ..."<<endl;
}
void bv_gen(long int** bv, long int* bv_final, int packet_num){
for (int i = 0; i < int_count; i++){
for (int j = 0; j < FIELD*(RULE+1); j++){
bv[j][i] = rand() % 1000000;
}
}
for(int i = 0; i < packet_num; i++){
bv_final[i] = -1;
}
cout<<"... BV Gen ..."<<endl;
}
void bv_gen_short(int* bv, int* bv_final, int packet_num){
for (int i = 0; i < FIELD*(RULE + 1)*int_count; i++){
bv[i] = rand() % 5;
}
for(int i = 0; i < packet_num; i++){
bv_final[i] = 1;
}
cout<<"... BV_Short Gen ..."<<endl;
}
void data_test(int** tree, int** headers, long int** bv, int* bv_final, int packet_num, int type){
if (type > 15 | type == 0){
return;
}
if (type % 2 == 1){
cout<<"Tree: "<<endl;
for(int i = 0; i < RULE; i++){
cout<<"Line: "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<tree[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 4 == 2 | type % 4 == 3){
cout<<endl<<"Headers: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<"Header "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<headers[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 8 == 4 | type % 8 == 5 | type % 8 == 6 | type % 8 == 7){
cout<<endl<<"bv: "<<endl;
for(int i = 0; i < ALLRULE; i++){
cout<<"Line "<<i<<": ";
for (int j = 0; j < FIELD*(RULE+1); j++){
cout<<bv[j][i]<<" ";
}
cout<<endl;
}
}
if (type > 7){
cout<<endl<<"bv_final: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<bv_final[i]<<" ";
}
cout<<endl;
}
cout<<"============== End of Print =============="<<endl;
}
__global__ void packet_classify(int* gpu_tree, int* gpu_headers, int* gpu_match_result, int packet_num){
__shared__ int gpu_tree_shared[FIELD*RULE];
int level = 0;
while(level * block_dim + threadIdx.x < FIELD * RULE){
gpu_tree_shared[level * block_dim + threadIdx.x] = gpu_tree[level * block_dim + threadIdx.x];
level++;
}
__syncthreads();
int i = 0;
while (i < RULE){
i = 2 * i + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] <= gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 1 + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] > gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 2;
}
gpu_match_result[blockDim.x * blockIdx.x + threadIdx.x] = i - RULE;
}
__global__ void pc_short(int* gpu_tree, int* gpu_headers, int* gpu_bv, int* gpu_bv_final, int packet_num){
__shared__ int gpu_tree_shared[FIELD*RULE];
__shared__ int gpu_bv_shared[FIELD*(RULE+1)*int_count];
//__shared__ int gpu_match_result[ block_dim ];
if (threadIdx.x < FIELD * RULE){
gpu_tree_shared[threadIdx.x] = gpu_tree[threadIdx.x];
}
if (threadIdx.x >= FIELD * RULE && threadIdx.x <= FIELD * (RULE + 1) * int_count){
gpu_bv_shared[threadIdx.x - FIELD * RULE] = gpu_bv[threadIdx.x - FIELD * RULE];
}
__syncthreads();
int index = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ int partial_result;
partial_result = 0xffffffff;
for (int j = 0; j < FIELD; j++){
int i = 0;
while (i < RULE){
i = 2 * i + (gpu_headers[index * FIELD + j] <= gpu_tree_shared[index % FIELD * RULE + i]) * 1 + (gpu_headers[index * FIELD + j] > gpu_tree_shared[index % FIELD * RULE + i]) * 2;
}
partial_result &= gpu_bv_shared[i - RULE];
}
gpu_bv_final[ index ] = partial_result;
//int i = 0, j = 0;
//int index = blockDim.x*blockIdx.x + threadIdx.x;
//while (i < RULE){
// i = 2 * i + (gpu_headers[index] <= gpu_tree_shared[index / packet_num * RULE+i]) * 1 + (gpu_headers[index] > gpu_tree_shared[index / packet_num * RULE+i]) * 2;
//}
//gpu_bv_final[ index / FIELD ] = gpu_bv_shared[i - RULE];
//__syncthreads();
//if(threadIdx.x < block_dim/ FIELD){
// for (j = 0; j < FIELD;j++){
// gpu_bv_final[blockIdx.x * (block_dim / FIELD) + threadIdx.x] &= gpu_match_result[threadIdx.x * FIELD + j];
// //gpu_bv_final[blockDim.x * (block_dim / FIELD) + threadIdx.x] &= 1;
// }
//}
//gpu_match_result[blockDim.x * blockIdx.x + threadIdx.x] = i - RULE;
}
__global__ void packet_merge(long int* gpu_bv, int* gpu_match_result, long int* gpu_merge_result, long int*gpu_bv_final, int packet_num){
int index = blockDim.x * blockIdx.x + threadIdx.x;
int packetIdx = index/int_count;
gpu_merge_result[index] = gpu_bv[gpu_match_result[packetIdx*15]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+1]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+2]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+3]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+4]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+5]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+6]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+7]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+8]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+9]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+10]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+11]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+12]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+13]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+14]*int_count + index%int_count];
__syncthreads();
if (blockDim.x * blockIdx.x + threadIdx.x < packet_num){
gpu_bv_final[blockDim.x*blockIdx.x+threadIdx.x] = gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+1] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+2] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+3] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+4] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+5] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+6] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+7] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+8] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+9] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+10] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+11] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+12] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+13] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+14] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+15] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+16] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+17] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+18] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+19] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+20] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+21] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+22] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+23] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+24] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+25] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+26] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+27] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+28] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+29] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+30] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+31];
}
}
void merge(void* foo){
pthread_param_C* param = (pthread_param_C*) foo;
for (int i = 0; i < param->BATCH; i++){
//cout<<"[ Merge ] Thread: "<<param->thread_id<<", header # "<<i<<endl;
for (int j = 0; j < int_count; j++){
/*long int merge_partial = 0xffffffffffffffff;
for (int k = 0; k < FIELD; k++){
merge_partial &= param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + k]][j];
if (merge_partial == 0){
break;
}
}
if (merge_partial != 0){
param->merge_result[(param->thread_id * param->BATCH + i) * int_count + j] = merge_partial;
break;
}*/
param->merge_result[(param->thread_id * param->BATCH + i) * int_count + j] = param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 0]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 1]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 2]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 3]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 4]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 5]][j];/*
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 6]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 7]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 8]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 9]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 10]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 11]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 12]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 13]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 14]][j];*/
}
}
//cout<<"Thread "<<param->thread_id<<" finish!"<<endl;
}
void partial_merge(void* foo){
pthread_param_P* param = (pthread_param_P*) foo;
}
void final_merge(void* foo){
pthread_param_F* param = (pthread_param_F*) foo;
}
|
680105522dd386b771862bf0727cb922d2bb783d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <vector>
#include <numeric>
#include "cuda_error_check.h"
#include "implementation.h"
#include "utils.h"
#include "sequential.cpp"
#include <uchar.h>
using namespace std;
int main(int argc, char** argv){
try {
//declare and initialize variabls
string usage =
"\tCommand line arguments:\n\
Input file: E.g., --input in.txt\n\
Output path: E.g., --output out.txt\n\
Block size: E.g., --bsize 1024\n\
Block count: E.g., --bcount 2\n\
Method: E.g., --method 1 or 2\n";
string inputFileName;
string outputFileName;
ifstream inputFile;
ofstream outputFile;
int bsize = 0, bcount = 0;
int method = 0;
int deviceID = 0;
hipDeviceProp_t deviceProp;
char* deviceName = NULL;
// int numberOfChildren = 1009; // use a large prime number
cout<<"testing number of children"<<NUM_CHILDREN<<endl;
//check that CUDA is supported and get the name of the device
CUDAErrorCheck(hipSetDevice(deviceID));
CUDAErrorCheck(hipGetDeviceProperties(&deviceProp, deviceID));
deviceName = deviceProp.name;
//parse program arguments
for( int i = 1; i < argc; i++ ){
if ( !strcmp(argv[i], "--input") && i != argc-1 ) {
inputFileName = string(argv[i+1]);
inputFile.open(inputFileName.c_str());
} else if( !strcmp(argv[i], "--output") && i != argc-1 ) {
outputFileName = string(argv[i+1]);
outputFile.open(outputFileName.c_str());
} else if( !strcmp(argv[i], "--bsize") && i != argc-1 ) {
bsize = atoi( argv[i+1] );
} else if( !strcmp(argv[i], "--bcount") && i != argc-1 ) {
bcount = atoi( argv[i+1] );
} else if( !strcmp(argv[i], "--method") && i != argc-1 ) {
method = atoi( argv[i+1] );
}
}
//verify program arguments
if(!inputFile){
throw runtime_error("Failed to open specified file: " + inputFileName);
}
if(!outputFile){
throw runtime_error("Failed to open specified file: " + outputFileName);
}
if(!inputFile.is_open() || !outputFile.is_open()){
cerr << "Usage: " << usage;
throw runtime_error("Initialization error happened: input/output file");
}
if(bsize <= 0 || bcount <= 0){
cerr << "Usage: " << usage;
throw runtime_error("Initialization error happened: block size/count");
}
if(method == 0){
cerr << "Usage: " << usage;
throw runtime_error("Initialization error happened: method");
}
//parse input file
vector<string> strings = parseFile(inputFile);
inputFile.close();
char16_t* text; //all the strings concatenated into a single string
int* indices; //the starting index of each string
int* suffixes; //the starting index of each suffix
int totalLength; //length of text (includes term sequence)
int numStrings; //number of strings
int numSuffixes; //number of suffixes
//parseStrings(strings,text,indices,suffixes,totalLength,numStrings,numSuffixes);
preParseStrings(strings,text,indices,
suffixes,totalLength,numStrings,numSuffixes,NUM_CHILDREN);
//print program properties
cout << "Device: " << deviceName;
cout << ", bsize: " << bsize << ", bcount: " << bcount;
cout << ", method: " << method << endl;
cout << "Input file: " << inputFileName;
cout << ", Number of strings: " << numStrings;
cout << ", Number of suffixes: " << numSuffixes;
cout << ", total length: " << totalLength << endl;
cout << "text is: ";
for(int i=0;i<totalLength;i++){
cout<<text[i]<<" ";
}
// test char16_t
// char16_t* testChar;
// testChar = (char16_t*)malloc(2+1);
// char16_t* testChar = (char16_t*)malloc(2+1);
// testChar[0] = 999;
// testChar[1] = 'a';
// cout << ", first test char is: " <<testChar[0] <<", second test char is: " <<testChar[1] << endl;
// int lengthNewText = totalLength/numberOfChildren;
// cout<<"length of text is: "<< length<<endl;
CUDAErrorCheck(hipDeviceSetLimit(hipLimitMallocHeapSize, 1000000000));
CUDAErrorCheck(hipDeviceSetLimit(hipLimitStackSize, 50000));
size_t limit = 0;
hipDeviceGetLimit(&limit, hipLimitMallocHeapSize);
printf("hipLimitMallocHeapSize: %u\n", (unsigned)limit);
hipDeviceGetLimit(&limit, hipLimitStackSize);
printf("hipLimitStackSize: %u\n", (unsigned)limit);
print_seq_runtime(text);
char16_t* output = NULL;
//process method
switch(method){
case 1:
// output = impl1(text,
// indices,
// totalLength,
// numStrings,
// bsize, bcount);
// cout << "text is:"<<text<<"\n";
// cout << "indices 2 is:"<<indices[1]<<"\n";
//// cout << "text is:"<<text;
break;
case 2:
output = impl2(text,
indices,
suffixes,
totalLength,
numStrings,
numSuffixes,
bsize, bcount);
break;
default:
cout << "Method " << method << " does not exist. Try method 1 or 2.\n";
break;
}
saveResults(outputFile, output);
outputFile.close();
//clean program memory
free(text);
free(indices);
free(suffixes);
CUDAErrorCheck(hipDeviceReset());
} catch(const exception& e){
cerr << e.what() << endl;
return EXIT_FAILURE;
} catch(...) {
cerr << "An exception has occurred." << endl;
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
| 680105522dd386b771862bf0727cb922d2bb783d.cu | #include <iostream>
#include <fstream>
#include <vector>
#include <numeric>
#include "cuda_error_check.h"
#include "implementation.h"
#include "utils.h"
#include "sequential.cpp"
#include <uchar.h>
using namespace std;
int main(int argc, char** argv){
try {
//declare and initialize variabls
string usage =
"\tCommand line arguments:\n\
Input file: E.g., --input in.txt\n\
Output path: E.g., --output out.txt\n\
Block size: E.g., --bsize 1024\n\
Block count: E.g., --bcount 2\n\
Method: E.g., --method 1 or 2\n";
string inputFileName;
string outputFileName;
ifstream inputFile;
ofstream outputFile;
int bsize = 0, bcount = 0;
int method = 0;
int deviceID = 0;
cudaDeviceProp deviceProp;
char* deviceName = NULL;
// int numberOfChildren = 1009; // use a large prime number
cout<<"testing number of children"<<NUM_CHILDREN<<endl;
//check that CUDA is supported and get the name of the device
CUDAErrorCheck(cudaSetDevice(deviceID));
CUDAErrorCheck(cudaGetDeviceProperties(&deviceProp, deviceID));
deviceName = deviceProp.name;
//parse program arguments
for( int i = 1; i < argc; i++ ){
if ( !strcmp(argv[i], "--input") && i != argc-1 ) {
inputFileName = string(argv[i+1]);
inputFile.open(inputFileName.c_str());
} else if( !strcmp(argv[i], "--output") && i != argc-1 ) {
outputFileName = string(argv[i+1]);
outputFile.open(outputFileName.c_str());
} else if( !strcmp(argv[i], "--bsize") && i != argc-1 ) {
bsize = atoi( argv[i+1] );
} else if( !strcmp(argv[i], "--bcount") && i != argc-1 ) {
bcount = atoi( argv[i+1] );
} else if( !strcmp(argv[i], "--method") && i != argc-1 ) {
method = atoi( argv[i+1] );
}
}
//verify program arguments
if(!inputFile){
throw runtime_error("Failed to open specified file: " + inputFileName);
}
if(!outputFile){
throw runtime_error("Failed to open specified file: " + outputFileName);
}
if(!inputFile.is_open() || !outputFile.is_open()){
cerr << "Usage: " << usage;
throw runtime_error("Initialization error happened: input/output file");
}
if(bsize <= 0 || bcount <= 0){
cerr << "Usage: " << usage;
throw runtime_error("Initialization error happened: block size/count");
}
if(method == 0){
cerr << "Usage: " << usage;
throw runtime_error("Initialization error happened: method");
}
//parse input file
vector<string> strings = parseFile(inputFile);
inputFile.close();
char16_t* text; //all the strings concatenated into a single string
int* indices; //the starting index of each string
int* suffixes; //the starting index of each suffix
int totalLength; //length of text (includes term sequence)
int numStrings; //number of strings
int numSuffixes; //number of suffixes
//parseStrings(strings,text,indices,suffixes,totalLength,numStrings,numSuffixes);
preParseStrings(strings,text,indices,
suffixes,totalLength,numStrings,numSuffixes,NUM_CHILDREN);
//print program properties
cout << "Device: " << deviceName;
cout << ", bsize: " << bsize << ", bcount: " << bcount;
cout << ", method: " << method << endl;
cout << "Input file: " << inputFileName;
cout << ", Number of strings: " << numStrings;
cout << ", Number of suffixes: " << numSuffixes;
cout << ", total length: " << totalLength << endl;
cout << "text is: ";
for(int i=0;i<totalLength;i++){
cout<<text[i]<<" ";
}
// test char16_t
// char16_t* testChar;
// testChar = (char16_t*)malloc(2+1);
// char16_t* testChar = (char16_t*)malloc(2+1);
// testChar[0] = 999;
// testChar[1] = 'a';
// cout << ", first test char is: " <<testChar[0] <<", second test char is: " <<testChar[1] << endl;
// int lengthNewText = totalLength/numberOfChildren;
// cout<<"length of text is: "<< length<<endl;
CUDAErrorCheck(cudaDeviceSetLimit(cudaLimitMallocHeapSize, 1000000000));
CUDAErrorCheck(cudaDeviceSetLimit(cudaLimitStackSize, 50000));
size_t limit = 0;
cudaDeviceGetLimit(&limit, cudaLimitMallocHeapSize);
printf("cudaLimitMallocHeapSize: %u\n", (unsigned)limit);
cudaDeviceGetLimit(&limit, cudaLimitStackSize);
printf("cudaLimitStackSize: %u\n", (unsigned)limit);
print_seq_runtime(text);
char16_t* output = NULL;
//process method
switch(method){
case 1:
// output = impl1(text,
// indices,
// totalLength,
// numStrings,
// bsize, bcount);
// cout << "text is:"<<text<<"\n";
// cout << "indices 2 is:"<<indices[1]<<"\n";
//// cout << "text is:"<<text;
break;
case 2:
output = impl2(text,
indices,
suffixes,
totalLength,
numStrings,
numSuffixes,
bsize, bcount);
break;
default:
cout << "Method " << method << " does not exist. Try method 1 or 2.\n";
break;
}
saveResults(outputFile, output);
outputFile.close();
//clean program memory
free(text);
free(indices);
free(suffixes);
CUDAErrorCheck(cudaDeviceReset());
} catch(const exception& e){
cerr << e.what() << endl;
return EXIT_FAILURE;
} catch(...) {
cerr << "An exception has occurred." << endl;
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
294992e7ad32253b4530c4fb2acf9647b62915a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "image.h"
#ifdef CUDA_ENABLED
CUDA_GLOBAL void cudaResetImageKernel(vec3* pixels, int nx, int ny)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= nx) || (j >= ny))
return;
int pixelIndex = j * nx + i;
pixels[pixelIndex] = vec3(0.0f, 0.0f, 0.0f);
}
#endif // CUDA_ENABLED
#ifdef CUDA_ENABLED
void Image::cudaResetImage()
{
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
cudaResetImageKernel << <blocks, threads >> > (pixels, nx, ny);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
}
#endif // CUDA_ENABLED
| 294992e7ad32253b4530c4fb2acf9647b62915a4.cu | #include "image.h"
#ifdef CUDA_ENABLED
CUDA_GLOBAL void cudaResetImageKernel(vec3* pixels, int nx, int ny)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= nx) || (j >= ny))
return;
int pixelIndex = j * nx + i;
pixels[pixelIndex] = vec3(0.0f, 0.0f, 0.0f);
}
#endif // CUDA_ENABLED
#ifdef CUDA_ENABLED
void Image::cudaResetImage()
{
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
cudaResetImageKernel << <blocks, threads >> > (pixels, nx, ny);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
}
#endif // CUDA_ENABLED
|
60a43d5f8df5526aa3638cdebf8d9c278e0604a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* calcPressureWS.cu
*
* Created on: 17-12-2013
* Author: Kamil Szewc
*/
#include "../../sph.h"
#include "../../hlp.h"
__global__ void calcPressureWSDP(Particle *p, Parameters *par)
{
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
while (tid < par->N) {
p[tid].p = p[tid].b * (pow(p[tid].d / (p[tid].di * p[tid].o), p[tid].gamma) - 1.0);
tid += blockDim.x * gridDim.x;
}
}
| 60a43d5f8df5526aa3638cdebf8d9c278e0604a2.cu | /*
* calcPressureWS.cu
*
* Created on: 17-12-2013
* Author: Kamil Szewc
*/
#include "../../sph.h"
#include "../../hlp.h"
__global__ void calcPressureWSDP(Particle *p, Parameters *par)
{
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
while (tid < par->N) {
p[tid].p = p[tid].b * (pow(p[tid].d / (p[tid].di * p[tid].o), p[tid].gamma) - 1.0);
tid += blockDim.x * gridDim.x;
}
}
|
0f395c6a0d6dda287bbddfe4f86603af082ba577.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/layer.hpp"
#include "caffe/layers/flex_lstm_layer.hpp"
namespace caffe {
__device__ void flat_idx_to_subgrid(unsigned tidx, unsigned subgrid_dim,
unsigned batch_size, unsigned ntypes, unsigned& i, unsigned& j, unsigned& k,
unsigned& grid, unsigned& batch_idx) {
//we have a flat index (our thread index) and want to map this to a valid 3d
//index that falls within the subgrid
unsigned idx = tidx;
k = idx % subgrid_dim;
idx /= subgrid_dim;
j = idx % subgrid_dim;
idx /= subgrid_dim;
i = idx % subgrid_dim;
idx /= subgrid_dim;
grid = idx % ntypes;
idx /= ntypes;
batch_idx = idx % batch_size;
}
template <typename Dtype>
__global__ void LSTMFlexForward(const int nthreads, const Dtype* src, Dtype* dest,
AccessPattern pattern, unsigned batch_size, unsigned ntypes, unsigned
subgrid_dim, unsigned dim, unsigned current_timestep, unsigned cube_stride,
unsigned example_size) {
//strided cube version:
//use the current_timestep to find the location of the first value in
//the subcube we're going to use at this timestep; this is our starting
//offset
unsigned overall_size = dim * dim * dim;
unsigned factor = (((dim - subgrid_dim) / cube_stride) + 1);
unsigned x_offset = ((current_timestep / (factor * factor)) % factor) * cube_stride;
unsigned y_offset = ((current_timestep / factor) % factor) * cube_stride;
unsigned z_offset = (current_timestep % factor) * cube_stride;
unsigned subgrid_count = batch_size * ntypes * subgrid_dim * subgrid_dim * subgrid_dim;
CUDA_KERNEL_LOOP(tidx, subgrid_count) {
//where in the grid is this index?
unsigned i;
unsigned j;
unsigned k;
unsigned grid;
unsigned batch_idx;
flat_idx_to_subgrid(tidx, subgrid_dim, batch_size, ntypes, i, j, k,
grid, batch_idx);
//what overall index does that correspond to?
unsigned subgrid_idx = (((batch_idx * ntypes + grid) * subgrid_dim + i) *
subgrid_dim + j) * subgrid_dim + k;
unsigned overall_idx = batch_idx * example_size + grid * overall_size +
x_offset * dim * dim + y_offset * dim + z_offset +
((i * dim) + j) * dim + k;
dest[subgrid_idx] = src[overall_idx];
}
}
template <typename Dtype>
__global__ void LSTMFlexBackward(const int nthreads, const Dtype* src, Dtype* dest,
Dtype* total_diff, const Dtype* partial_diff, AccessPattern pattern, unsigned batch_size,
unsigned ntypes, unsigned subgrid_dim, unsigned dim, unsigned current_timestep,
unsigned cube_stride, unsigned example_size) {
unsigned overall_size = dim * dim * dim;
//to be used for accumulating diff for current subcube blob
unsigned factor = (((dim - subgrid_dim) / cube_stride) + 1);
unsigned x_offset = ((current_timestep / (factor * factor)) % factor) * cube_stride;
unsigned y_offset = ((current_timestep / factor) % factor) * cube_stride;
unsigned z_offset = (current_timestep % factor) * cube_stride;
//to be used to compute indices for current_x blob update (to be ready
//for the previous timestep), if we aren't at the first timestep
unsigned x_offset_prev;
unsigned y_offset_prev;
unsigned z_offset_prev;
if (current_timestep > 0) {
x_offset_prev = x_offset - cube_stride;
y_offset_prev = y_offset - cube_stride;
z_offset_prev = z_offset - cube_stride;
}
unsigned subgrid_count = batch_size * ntypes * subgrid_dim * subgrid_dim * subgrid_dim;
CUDA_KERNEL_LOOP(tidx, subgrid_count) {
//where in the grid is this index?
unsigned i;
unsigned j;
unsigned k;
unsigned grid;
unsigned batch_idx;
flat_idx_to_subgrid(tidx, subgrid_dim, batch_size, ntypes, i, j, k,
grid, batch_idx);
//what overall index does that correspond to?
unsigned subgrid_idx = (((batch_idx * ntypes + grid) * subgrid_dim + i) *
subgrid_dim + j) * subgrid_dim + k;
unsigned overall_idx = batch_idx * example_size + grid * overall_size +
x_offset * dim * dim + y_offset * dim + z_offset +
((i * dim) + j) * dim + k;
//accumulate diff
atomicAdd(&total_diff[overall_idx], partial_diff[subgrid_idx]);
if (current_timestep > 0) {
//also update current data blob to be accurate for previous timestep
unsigned overall_idx_prev = batch_idx * example_size + grid * overall_size +
x_offset_prev * dim * dim + y_offset_prev * dim + z_offset_prev +
((i * dim) + j) * dim + k;
dest[subgrid_idx] = src[overall_idx_prev];
}
}
}
template <typename Dtype>
void LSTMDataGetterLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = top[0]->count();
const Dtype* src = bottom[0]->gpu_data();
Dtype* dest = top[0]->mutable_gpu_data();
//Update blob that will be used as input to the RNN at this timestep as
//required by the chosen access pattern
switch(pattern) {
case AccessPatterns::strided_cube:
{
unsigned subgrid_size = batch_size * ntypes * subgrid_dim * subgrid_dim * subgrid_dim;
hipLaunchKernelGGL(( LSTMFlexForward<Dtype>), dim3(CAFFE_GET_BLOCKS(subgrid_size)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, src, dest, pattern, batch_size, ntypes,
subgrid_dim, dim, current_timestep, cube_stride, example_size);
break;
}
default:
{
assert(pattern < AccessPatterns::num_patterns);
}
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void LSTMDataGetterLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
Dtype* total_diff = bottom[0]->mutable_gpu_diff();
const Dtype* partial_diff = top[0]->gpu_diff();
if (current_timestep == num_timesteps-1) {
//TODO: this is a synchronous call, right?
CUDA_CHECK(hipMemset(total_diff, 0, bottom[0]->count()));
}
const int count = top[0]->count();
const Dtype* src = bottom[0]->gpu_data();
Dtype* dest = top[0]->mutable_gpu_data();
//- use diff computed for the per-timestep blob to compute the relevant part
//of the diff we're building up for the full input
//
//- also update the data blob contents to be correct for the *previous* timestep -
//by the time the DataGetter layer is hit during backward, the blobs that need
//current_x to be set to the correct contents for its timestep have already
//computed their diffs with it, so now we set up the contents to work for the
//layers before it
switch(pattern) {
case AccessPatterns::strided_cube:
{
unsigned subgrid_size = batch_size * ntypes * subgrid_dim * subgrid_dim * subgrid_dim;
hipLaunchKernelGGL(( LSTMFlexBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(subgrid_size)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, src, dest, total_diff, partial_diff, pattern,
batch_size, ntypes, subgrid_dim, dim, current_timestep, cube_stride,
example_size);
break;
}
default:
{
assert(pattern < AccessPatterns::num_patterns);
}
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(LSTMDataGetterLayer);
} // namespace caffe
| 0f395c6a0d6dda287bbddfe4f86603af082ba577.cu | #include "caffe/layer.hpp"
#include "caffe/layers/flex_lstm_layer.hpp"
namespace caffe {
__device__ void flat_idx_to_subgrid(unsigned tidx, unsigned subgrid_dim,
unsigned batch_size, unsigned ntypes, unsigned& i, unsigned& j, unsigned& k,
unsigned& grid, unsigned& batch_idx) {
//we have a flat index (our thread index) and want to map this to a valid 3d
//index that falls within the subgrid
unsigned idx = tidx;
k = idx % subgrid_dim;
idx /= subgrid_dim;
j = idx % subgrid_dim;
idx /= subgrid_dim;
i = idx % subgrid_dim;
idx /= subgrid_dim;
grid = idx % ntypes;
idx /= ntypes;
batch_idx = idx % batch_size;
}
template <typename Dtype>
__global__ void LSTMFlexForward(const int nthreads, const Dtype* src, Dtype* dest,
AccessPattern pattern, unsigned batch_size, unsigned ntypes, unsigned
subgrid_dim, unsigned dim, unsigned current_timestep, unsigned cube_stride,
unsigned example_size) {
//strided cube version:
//use the current_timestep to find the location of the first value in
//the subcube we're going to use at this timestep; this is our starting
//offset
unsigned overall_size = dim * dim * dim;
unsigned factor = (((dim - subgrid_dim) / cube_stride) + 1);
unsigned x_offset = ((current_timestep / (factor * factor)) % factor) * cube_stride;
unsigned y_offset = ((current_timestep / factor) % factor) * cube_stride;
unsigned z_offset = (current_timestep % factor) * cube_stride;
unsigned subgrid_count = batch_size * ntypes * subgrid_dim * subgrid_dim * subgrid_dim;
CUDA_KERNEL_LOOP(tidx, subgrid_count) {
//where in the grid is this index?
unsigned i;
unsigned j;
unsigned k;
unsigned grid;
unsigned batch_idx;
flat_idx_to_subgrid(tidx, subgrid_dim, batch_size, ntypes, i, j, k,
grid, batch_idx);
//what overall index does that correspond to?
unsigned subgrid_idx = (((batch_idx * ntypes + grid) * subgrid_dim + i) *
subgrid_dim + j) * subgrid_dim + k;
unsigned overall_idx = batch_idx * example_size + grid * overall_size +
x_offset * dim * dim + y_offset * dim + z_offset +
((i * dim) + j) * dim + k;
dest[subgrid_idx] = src[overall_idx];
}
}
template <typename Dtype>
__global__ void LSTMFlexBackward(const int nthreads, const Dtype* src, Dtype* dest,
Dtype* total_diff, const Dtype* partial_diff, AccessPattern pattern, unsigned batch_size,
unsigned ntypes, unsigned subgrid_dim, unsigned dim, unsigned current_timestep,
unsigned cube_stride, unsigned example_size) {
unsigned overall_size = dim * dim * dim;
//to be used for accumulating diff for current subcube blob
unsigned factor = (((dim - subgrid_dim) / cube_stride) + 1);
unsigned x_offset = ((current_timestep / (factor * factor)) % factor) * cube_stride;
unsigned y_offset = ((current_timestep / factor) % factor) * cube_stride;
unsigned z_offset = (current_timestep % factor) * cube_stride;
//to be used to compute indices for current_x blob update (to be ready
//for the previous timestep), if we aren't at the first timestep
unsigned x_offset_prev;
unsigned y_offset_prev;
unsigned z_offset_prev;
if (current_timestep > 0) {
x_offset_prev = x_offset - cube_stride;
y_offset_prev = y_offset - cube_stride;
z_offset_prev = z_offset - cube_stride;
}
unsigned subgrid_count = batch_size * ntypes * subgrid_dim * subgrid_dim * subgrid_dim;
CUDA_KERNEL_LOOP(tidx, subgrid_count) {
//where in the grid is this index?
unsigned i;
unsigned j;
unsigned k;
unsigned grid;
unsigned batch_idx;
flat_idx_to_subgrid(tidx, subgrid_dim, batch_size, ntypes, i, j, k,
grid, batch_idx);
//what overall index does that correspond to?
unsigned subgrid_idx = (((batch_idx * ntypes + grid) * subgrid_dim + i) *
subgrid_dim + j) * subgrid_dim + k;
unsigned overall_idx = batch_idx * example_size + grid * overall_size +
x_offset * dim * dim + y_offset * dim + z_offset +
((i * dim) + j) * dim + k;
//accumulate diff
atomicAdd(&total_diff[overall_idx], partial_diff[subgrid_idx]);
if (current_timestep > 0) {
//also update current data blob to be accurate for previous timestep
unsigned overall_idx_prev = batch_idx * example_size + grid * overall_size +
x_offset_prev * dim * dim + y_offset_prev * dim + z_offset_prev +
((i * dim) + j) * dim + k;
dest[subgrid_idx] = src[overall_idx_prev];
}
}
}
template <typename Dtype>
void LSTMDataGetterLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = top[0]->count();
const Dtype* src = bottom[0]->gpu_data();
Dtype* dest = top[0]->mutable_gpu_data();
//Update blob that will be used as input to the RNN at this timestep as
//required by the chosen access pattern
switch(pattern) {
case AccessPatterns::strided_cube:
{
unsigned subgrid_size = batch_size * ntypes * subgrid_dim * subgrid_dim * subgrid_dim;
LSTMFlexForward<Dtype><<<CAFFE_GET_BLOCKS(subgrid_size),
CAFFE_CUDA_NUM_THREADS>>>(count, src, dest, pattern, batch_size, ntypes,
subgrid_dim, dim, current_timestep, cube_stride, example_size);
break;
}
default:
{
assert(pattern < AccessPatterns::num_patterns);
}
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void LSTMDataGetterLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
Dtype* total_diff = bottom[0]->mutable_gpu_diff();
const Dtype* partial_diff = top[0]->gpu_diff();
if (current_timestep == num_timesteps-1) {
//TODO: this is a synchronous call, right?
CUDA_CHECK(cudaMemset(total_diff, 0, bottom[0]->count()));
}
const int count = top[0]->count();
const Dtype* src = bottom[0]->gpu_data();
Dtype* dest = top[0]->mutable_gpu_data();
//- use diff computed for the per-timestep blob to compute the relevant part
//of the diff we're building up for the full input
//
//- also update the data blob contents to be correct for the *previous* timestep -
//by the time the DataGetter layer is hit during backward, the blobs that need
//current_x to be set to the correct contents for its timestep have already
//computed their diffs with it, so now we set up the contents to work for the
//layers before it
switch(pattern) {
case AccessPatterns::strided_cube:
{
unsigned subgrid_size = batch_size * ntypes * subgrid_dim * subgrid_dim * subgrid_dim;
LSTMFlexBackward<Dtype><<<CAFFE_GET_BLOCKS(subgrid_size),
CAFFE_CUDA_NUM_THREADS>>>(count, src, dest, total_diff, partial_diff, pattern,
batch_size, ntypes, subgrid_dim, dim, current_timestep, cube_stride,
example_size);
break;
}
default:
{
assert(pattern < AccessPatterns::num_patterns);
}
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(LSTMDataGetterLayer);
} // namespace caffe
|
841c19d8c7fcfbe7346c7c68a29d054c14b63b8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helpers.h"
__constant__ float ckernel[81];
__global__ void conv_cuda(float *input, float *output, int width, int height,
float *kernel, int channels, int k_width,
int kernels) {
int k = blockIdx.z;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
int output_idx = i * width * kernels + j * kernels + k;
extern __shared__ float sdata[];
int smem_2d_size = (blockDim.x + 2 * k_width) * (blockDim.y + 2 * k_width);
if (threadIdx.y < k_width) {
// Top Overhang
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_y < 0) ? 0 : input[gmem_index];
}
// Top Left
if (threadIdx.x < k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_x < 0 || gmem_y < 0) ? 0 : input[gmem_index];
}
}
// Top Right
if (threadIdx.y < k_width && threadIdx.x >= blockDim.x - k_width) {
int smem_x = threadIdx.x + 2 * k_width;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x >= width || gmem_y < 0) ? 0 : input[gmem_index];
}
}
}
// Copy GMEm to SMEM here
// Left Overhang
if (threadIdx.x < k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_x < 0) ? 0 : input[gmem_index];
}
}
// Copy the block data
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x >= width || gmem_y >= height) ? 0 : input[gmem_index];
}
// Bottom
if (threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_y >= height) ? 0 : input[gmem_index];
}
// Bottom Left
if (threadIdx.x < k_width && threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x < 0 || gmem_y >= height) ? 0 : input[gmem_index];
}
}
}
// Right
if (threadIdx.x >= blockDim.x - k_width) {
int smem_x = threadIdx.x + 2 * k_width;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_x >= width) ? 0 : input[gmem_index];
}
}
// Bottom Right
if (threadIdx.x >= blockDim.x - k_width &&
threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x + 2 * k_width;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x >= width || gmem_y >= height) ? 0 : input[gmem_index];
}
}
__syncthreads();
if (i >= height || j >= width) {
return;
}
float tmp_output = 0;
for (int c = 0; c < channels; c++) {
for (int k_i = 0; k_i <= 2 * k_width; k_i++) {
for (int k_j = 0; k_j <= 2 * k_width; k_j++) {
smem_x = threadIdx.x + k_j;
smem_y = threadIdx.y + k_i;
int smem_index =
c * smem_2d_size + smem_x + smem_y * (blockDim.x + 2 * k_width);
int kernel_index =
k * channels * (2 * k_width + 1) * (2 * k_width + 1) +
c * (2 * k_width + 1) * (2 * k_width + 1) +
k_i * (2 * k_width + 1) + k_j;
tmp_output += sdata[smem_index] * kernel[kernel_index];
}
}
}
output[output_idx] = tmp_output;
return;
}
int main(int argc, char *argv[]) {
char *outputfile = (char *)"cuda_out_reorder.png";
// Check input image name
if (argc < 2) {
std::cout << "No file input" << std::endl;
return 0;
}
//
// Check if the filename is valid
char *filename = argv[1];
std::cout << argv[1] << std::endl;
// Load Image
cv::Mat image;
image = load_image(filename);
if (image.empty()) {
std::cout << "File not exist" << std::endl;
return 0;
}
//==================================
// Define I/O sizes
//==================================
int padding = 1;
int channels = 3;
int height = image.rows;
int width = image.cols;
int kernels = 3;
std::cout << "Image dims (HxW)is " << height << "x" << width << std::endl;
int height_padded = height + 2 * padding;
int width_padded = width + 2 * padding;
int input_bytes = channels * height * width * sizeof(float);
int output_bytes = channels * height * width * sizeof(float);
std::cout << "Padded dims is " << height_padded << "x" << width_padded
<< std::endl;
float *h_input = (float *)image.data;
// float *h_output = new float[output_bytes];
float *h_output;
h_output = (float *)malloc(output_bytes);
float *d_input;
float *d_output;
hipMalloc((void **)&d_input, input_bytes);
hipMalloc((void **)&d_output, output_bytes);
hipMemcpy(d_input, h_input, input_bytes, hipMemcpyHostToDevice);
// invoke Kernel
int bx = 64;
int by = 16;
dim3 block(bx, by); // you will want to configure this
dim3 grid((width + block.x - 1) / block.x, (height + block.y - 1) / block.y,
3);
printf("Grid : {%d, %d, %d} blocks. Blocks : {%d, %d} threads.\n", grid.x,
grid.y, grid.z, block.x, block.y);
//==================================
// Define Kernel data
//==================================
// Mystery kernel
const float kernel_template[3][3] = {{1, 1, 1}, {1, -8, 1}, {1, 1, 1}};
float *d_kernel;
float h_kernel[3][3][3][3];
int kernel_bytes = 3 * 3 * 3 * 3 * sizeof(float);
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
h_kernel[kernel][channel][row][column] = kernel_template[row][column];
}
}
}
}
hipMalloc((void **)&d_kernel, kernel_bytes);
hipMemcpy(d_kernel, h_kernel, kernel_bytes, hipMemcpyHostToDevice);
hipMemcpyToSymbol(ckernel, &h_kernel, kernel_bytes);
int k_size = 3;
int k_width = (k_size - 1) / 2;
int smem_size =
(bx + 2 * k_width) * (by + 2 * k_width) * channels * sizeof(float);
printf("SMEM size is %d \n", (bx + 2 * k_width) * (by + 2 * k_width));
//==================================
// CPU Convolution
//==================================
printf("Start conv\n");
double timeStampA = getTimeStamp();
hipLaunchKernelGGL(( conv_cuda), dim3(grid), dim3(block), smem_size, 0, d_input, d_output, width, height,
d_kernel, 3, k_width, kernels);
hipDeviceSynchronize();
double timeStampB = getTimeStamp();
hipMemcpy(h_output, d_output, input_bytes, hipMemcpyDeviceToHost);
//==================================
// Collect data
//==================================
// Print result
std::cout << "Total convolution time: " << timeStampB - timeStampA
<< std::endl;
std::cout << "Save Output to " << outputfile << std::endl;
save_image(outputfile, h_output, height, width);
hipFree(d_input);
hipFree(d_output);
hipFree(d_kernel);
hipDeviceReset();
delete[] h_output;
return 0;
} | 841c19d8c7fcfbe7346c7c68a29d054c14b63b8b.cu | #include "helpers.h"
__constant__ float ckernel[81];
__global__ void conv_cuda(float *input, float *output, int width, int height,
float *kernel, int channels, int k_width,
int kernels) {
int k = blockIdx.z;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
int output_idx = i * width * kernels + j * kernels + k;
extern __shared__ float sdata[];
int smem_2d_size = (blockDim.x + 2 * k_width) * (blockDim.y + 2 * k_width);
if (threadIdx.y < k_width) {
// Top Overhang
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_y < 0) ? 0 : input[gmem_index];
}
// Top Left
if (threadIdx.x < k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_x < 0 || gmem_y < 0) ? 0 : input[gmem_index];
}
}
// Top Right
if (threadIdx.y < k_width && threadIdx.x >= blockDim.x - k_width) {
int smem_x = threadIdx.x + 2 * k_width;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x >= width || gmem_y < 0) ? 0 : input[gmem_index];
}
}
}
// Copy GMEm to SMEM here
// Left Overhang
if (threadIdx.x < k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_x < 0) ? 0 : input[gmem_index];
}
}
// Copy the block data
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x >= width || gmem_y >= height) ? 0 : input[gmem_index];
}
// Bottom
if (threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_y >= height) ? 0 : input[gmem_index];
}
// Bottom Left
if (threadIdx.x < k_width && threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x < 0 || gmem_y >= height) ? 0 : input[gmem_index];
}
}
}
// Right
if (threadIdx.x >= blockDim.x - k_width) {
int smem_x = threadIdx.x + 2 * k_width;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] = (gmem_x >= width) ? 0 : input[gmem_index];
}
}
// Bottom Right
if (threadIdx.x >= blockDim.x - k_width &&
threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x + 2 * k_width;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
sdata[smem_index] =
(gmem_x >= width || gmem_y >= height) ? 0 : input[gmem_index];
}
}
__syncthreads();
if (i >= height || j >= width) {
return;
}
float tmp_output = 0;
for (int c = 0; c < channels; c++) {
for (int k_i = 0; k_i <= 2 * k_width; k_i++) {
for (int k_j = 0; k_j <= 2 * k_width; k_j++) {
smem_x = threadIdx.x + k_j;
smem_y = threadIdx.y + k_i;
int smem_index =
c * smem_2d_size + smem_x + smem_y * (blockDim.x + 2 * k_width);
int kernel_index =
k * channels * (2 * k_width + 1) * (2 * k_width + 1) +
c * (2 * k_width + 1) * (2 * k_width + 1) +
k_i * (2 * k_width + 1) + k_j;
tmp_output += sdata[smem_index] * kernel[kernel_index];
}
}
}
output[output_idx] = tmp_output;
return;
}
int main(int argc, char *argv[]) {
char *outputfile = (char *)"cuda_out_reorder.png";
// Check input image name
if (argc < 2) {
std::cout << "No file input" << std::endl;
return 0;
}
//
// Check if the filename is valid
char *filename = argv[1];
std::cout << argv[1] << std::endl;
// Load Image
cv::Mat image;
image = load_image(filename);
if (image.empty()) {
std::cout << "File not exist" << std::endl;
return 0;
}
//==================================
// Define I/O sizes
//==================================
int padding = 1;
int channels = 3;
int height = image.rows;
int width = image.cols;
int kernels = 3;
std::cout << "Image dims (HxW)is " << height << "x" << width << std::endl;
int height_padded = height + 2 * padding;
int width_padded = width + 2 * padding;
int input_bytes = channels * height * width * sizeof(float);
int output_bytes = channels * height * width * sizeof(float);
std::cout << "Padded dims is " << height_padded << "x" << width_padded
<< std::endl;
float *h_input = (float *)image.data;
// float *h_output = new float[output_bytes];
float *h_output;
h_output = (float *)malloc(output_bytes);
float *d_input;
float *d_output;
cudaMalloc((void **)&d_input, input_bytes);
cudaMalloc((void **)&d_output, output_bytes);
cudaMemcpy(d_input, h_input, input_bytes, cudaMemcpyHostToDevice);
// invoke Kernel
int bx = 64;
int by = 16;
dim3 block(bx, by); // you will want to configure this
dim3 grid((width + block.x - 1) / block.x, (height + block.y - 1) / block.y,
3);
printf("Grid : {%d, %d, %d} blocks. Blocks : {%d, %d} threads.\n", grid.x,
grid.y, grid.z, block.x, block.y);
//==================================
// Define Kernel data
//==================================
// Mystery kernel
const float kernel_template[3][3] = {{1, 1, 1}, {1, -8, 1}, {1, 1, 1}};
float *d_kernel;
float h_kernel[3][3][3][3];
int kernel_bytes = 3 * 3 * 3 * 3 * sizeof(float);
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
h_kernel[kernel][channel][row][column] = kernel_template[row][column];
}
}
}
}
cudaMalloc((void **)&d_kernel, kernel_bytes);
cudaMemcpy(d_kernel, h_kernel, kernel_bytes, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(ckernel, &h_kernel, kernel_bytes);
int k_size = 3;
int k_width = (k_size - 1) / 2;
int smem_size =
(bx + 2 * k_width) * (by + 2 * k_width) * channels * sizeof(float);
printf("SMEM size is %d \n", (bx + 2 * k_width) * (by + 2 * k_width));
//==================================
// CPU Convolution
//==================================
printf("Start conv\n");
double timeStampA = getTimeStamp();
conv_cuda<<<grid, block, smem_size>>>(d_input, d_output, width, height,
d_kernel, 3, k_width, kernels);
cudaDeviceSynchronize();
double timeStampB = getTimeStamp();
cudaMemcpy(h_output, d_output, input_bytes, cudaMemcpyDeviceToHost);
//==================================
// Collect data
//==================================
// Print result
std::cout << "Total convolution time: " << timeStampB - timeStampA
<< std::endl;
std::cout << "Save Output to " << outputfile << std::endl;
save_image(outputfile, h_output, height, width);
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_kernel);
cudaDeviceReset();
delete[] h_output;
return 0;
} |
2588f341cba8adf9d6b7b53e48c24fa49a69c823.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ComputeDerivativesKernel(int width, int height, int stride, float* Ix, float* Iy, float* Iz, hipTextureObject_t texSource, hipTextureObject_t texTarget)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height) return;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
float t0, t1;
// x derivative
t0 = tex2D<float>(texSource, x + 2.0f * dx, y);
t0 -= tex2D<float>(texSource, x + 1.0f * dx, y) * 8.0f;
t0 += tex2D<float>(texSource, x - 1.0f * dx, y) * 8.0f;
t0 -= tex2D<float>(texSource, x - 2.0f * dx, y);
t0 /= 12.0f;
t1 = tex2D<float>(texTarget, x + 2.0f * dx, y);
t1 -= tex2D<float>(texTarget, x + 1.0f * dx, y) * 8.0f;
t1 += tex2D<float>(texTarget, x - 1.0f * dx, y) * 8.0f;
t1 -= tex2D<float>(texTarget, x - 2.0f * dx, y);
t1 /= 12.0f;
*(((float*)((char*)Ix + stride * iy)) + ix) = (t0 + t1) * 0.5f;
// t derivative
*(((float*)((char*)Iz + stride * iy)) + ix) = tex2D<float>(texSource, x, y) - tex2D<float>(texTarget, x, y);
// y derivative
t0 = tex2D<float>(texSource, x, y + 2.0f * dy);
t0 -= tex2D<float>(texSource, x, y + 1.0f * dy) * 8.0f;
t0 += tex2D<float>(texSource, x, y - 1.0f * dy) * 8.0f;
t0 -= tex2D<float>(texSource, x, y - 2.0f * dy);
t0 /= 12.0f;
t1 = tex2D<float>(texTarget, x, y + 2.0f * dy);
t1 -= tex2D<float>(texTarget, x, y + 1.0f * dy) * 8.0f;
t1 += tex2D<float>(texTarget, x, y - 1.0f * dy) * 8.0f;
t1 -= tex2D<float>(texTarget, x, y - 2.0f * dy);
t1 /= 12.0f;
*(((float*)((char*)Iy + stride * iy)) + ix) = (t0 + t1) * 0.5f;
} | 2588f341cba8adf9d6b7b53e48c24fa49a69c823.cu | #include "includes.h"
__global__ void ComputeDerivativesKernel(int width, int height, int stride, float* Ix, float* Iy, float* Iz, cudaTextureObject_t texSource, cudaTextureObject_t texTarget)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height) return;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
float t0, t1;
// x derivative
t0 = tex2D<float>(texSource, x + 2.0f * dx, y);
t0 -= tex2D<float>(texSource, x + 1.0f * dx, y) * 8.0f;
t0 += tex2D<float>(texSource, x - 1.0f * dx, y) * 8.0f;
t0 -= tex2D<float>(texSource, x - 2.0f * dx, y);
t0 /= 12.0f;
t1 = tex2D<float>(texTarget, x + 2.0f * dx, y);
t1 -= tex2D<float>(texTarget, x + 1.0f * dx, y) * 8.0f;
t1 += tex2D<float>(texTarget, x - 1.0f * dx, y) * 8.0f;
t1 -= tex2D<float>(texTarget, x - 2.0f * dx, y);
t1 /= 12.0f;
*(((float*)((char*)Ix + stride * iy)) + ix) = (t0 + t1) * 0.5f;
// t derivative
*(((float*)((char*)Iz + stride * iy)) + ix) = tex2D<float>(texSource, x, y) - tex2D<float>(texTarget, x, y);
// y derivative
t0 = tex2D<float>(texSource, x, y + 2.0f * dy);
t0 -= tex2D<float>(texSource, x, y + 1.0f * dy) * 8.0f;
t0 += tex2D<float>(texSource, x, y - 1.0f * dy) * 8.0f;
t0 -= tex2D<float>(texSource, x, y - 2.0f * dy);
t0 /= 12.0f;
t1 = tex2D<float>(texTarget, x, y + 2.0f * dy);
t1 -= tex2D<float>(texTarget, x, y + 1.0f * dy) * 8.0f;
t1 += tex2D<float>(texTarget, x, y - 1.0f * dy) * 8.0f;
t1 -= tex2D<float>(texTarget, x, y - 2.0f * dy);
t1 /= 12.0f;
*(((float*)((char*)Iy + stride * iy)) + ix) = (t0 + t1) * 0.5f;
} |
f90e7b9a1f5a5dbc49a35925fadd9a00781332d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GNNConfig.h"
#include "../common/Time.h"
/*
* @author Vasileios Zois
* @email vzois@usc.edu
*
* CUDA neural network implementation
*/
#define MBATCH 128
#define TTILE 32
#define LTILE 32
#define DPT 4 //DATA PER THREADS
#define BSIZE 512
namespace gnn_kernels{
static __device__ hiprandState_t randDevStates[RAND_STATES];
__device__ float cudaUniRand(unsigned int tid){
return hiprand_uniform(&randDevStates[tid % RAND_STATES]);
}
__global__ void cudaSetupRandStatesKernel(unsigned int seed){
int i = threadIdx.x + blockIdx.x * blockDim.x;
hiprand_init(seed, blockIdx.x, 0, &randDevStates[i]);
}
__host__ void cudaInitRandStates(){
dim3 grid = grid_1D(RAND_STATES,RAND_BLOCK_THREADS);
dim3 block = block_1D(RAND_BLOCK_THREADS);
Utils<unsigned int> u;
hipLaunchKernelGGL(( cudaSetupRandStatesKernel), dim3(grid),dim3(block), 0, 0, u.uni(UINT_MAX));
handleDeviceErrors(hipDeviceSynchronize(),"Error initializing random states");
}
/*
* Testing activation functions on kernels.
*/
template<typename ACT_F>
__global__ void bench_test_activation(ACT_F F){
int i = threadIdx.x + blockIdx.x * blockDim.x;
float a = 0;
for(int j = 0; j<blockDim.x;j++){
a+= F.F((float)i*j);
}
}
/*
* Initialize matrices random weights
*/
template<typename DATA_T>
__global__ void randomWeights(DATA_T *W_j,unsigned int rows, unsigned int cols){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < rows * cols){
if((i+1) % cols == 0){
W_j[i] = 0.0;
}else{
W_j[i] = (2.0 * cudaUniRand(i) - 1.0) * (sqrtf(6.0)/ (rows + cols));
}
}
}
/*
* Load current batch of train examples.
* 1: First layer batch array.
* 2: Training example matrix.
* 3: Input layer dimension
* 4: Batch size dimension
* 5: Offset indicating the batch being loaded.
* Notes:
* Transpose version assumes that the training examples matrix is stored
* in a row-wise manner.
*/
template<typename DATA_T, unsigned int TILE>
__global__ void loadT(
DATA_T *A_j,
DATA_T *tEx,
unsigned int clayer, unsigned int bsize,
unsigned int car, unsigned int dim,
unsigned int voffset, unsigned int hoffset
){
__shared__ DATA_T stEx[TILE * TILE];
int row = ( blockIdx.y * blockDim.y + threadIdx.y );
int col = ( blockIdx.x * blockDim.x + threadIdx.x );
if(voffset + row < car && col + hoffset < dim && row < bsize && col < clayer){
stEx[threadIdx.y * TILE + threadIdx.x] = tEx[(row + voffset) * dim + (col + hoffset)];
//stEx[threadIdx.y * TILE + threadIdx.x] = tEx[(voffset + row) * dim + (col + hoffset)];
}
__syncthreads();
//col * bsize + row
row = (blockIdx.x * blockDim.x + threadIdx.y);
col = (blockIdx.y * blockDim.y + threadIdx.x);
if( row < clayer && col < bsize){
A_j[row * bsize + col] = stEx[threadIdx.x * TILE + threadIdx.y];
}
}
/*
* Compute matrix of activation values for a single layer of a given batch.
* 1: Current layer weight matrix.
* 2: Current layer matrix of activation vectors.
* 3: Next layer matrix of activation vectors.
* 4: W_j = nlayer x clayer , A_j = clayer x bsize, A_jj = nlayer x bsize.
* 5: Offset: 0 for hidden and output layer, corresponding row of training example matrix for input layer.
*/
template<typename DATA_T, typename ACT_F, unsigned int TILE>
__global__ void mmul(
DATA_T *A_jj,
DATA_T *W_j,
DATA_T *A_j,
ACT_F F,
unsigned int nlayer,
unsigned int clayer,
unsigned int bsize
)
{
__shared__ DATA_T sWj[TILE * TILE];
__shared__ DATA_T sAj[TILE * TILE];
__shared__ DATA_T bias[TILE];
int row = ( blockIdx.y * blockDim.y + threadIdx.y );
int col = ( blockIdx.x * blockDim.x + threadIdx.x );
if(threadIdx.x == 0) bias[threadIdx.y] = W_j[row * (clayer + 1) + clayer];
__syncthreads();
DATA_T Ajj = bias[threadIdx.y];
int loadOffset = threadIdx.y*TILE + threadIdx.x;
for(int i = 0;i < ((clayer - 1) / TILE) + 1; i++){
if( row < nlayer && (i * TILE + threadIdx.x ) < clayer)
sWj[loadOffset] = W_j[ row * ( clayer + 1 ) + i * TILE + threadIdx.x];// clayer + 1 to avoid bias vector
else sWj[loadOffset] = 0.0;
if ( i*TILE + threadIdx.y < clayer && col < bsize )
sAj[loadOffset] = A_j[(i * TILE + threadIdx.y) * bsize + col];
else sAj[loadOffset] = 0.0;
__syncthreads();
for(int j = 0;j < TILE; j++){
Ajj += sWj[threadIdx.y * TILE + j] * sAj[j * TILE + threadIdx.x];
}
__syncthreads();
}
if( row < nlayer && col < bsize )
A_jj[row * bsize + col ] = F.F(Ajj);
}
/*
* Kernel that computes the last layer difference between the batch activation matrix and the expected output
* matrix.
*/
template<typename DATA_T>
__global__ void outputD(
DATA_T *D_j,
DATA_T *ExA_j,
DATA_T *A_j,
unsigned int size
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < size){ D_j[i] = ExA_j[i] - A_j[i]; }
}
/*
* Transpose matrix multiplication.
* D_j = (W_j)^T . D_jj
*/
template<typename DATA_T, unsigned int TILE>
__global__ void tmmul(
DATA_T *D_j,
DATA_T *W_j,
DATA_T *D_jj,
unsigned int clayer,
unsigned int nlayer,
unsigned int bsize
)
{
__shared__ DATA_T sWj[TILE * TILE];
__shared__ DATA_T sDjj[TILE * TILE];
DATA_T Dj = 0.0;
int colW = ( blockIdx.y * blockDim.y + threadIdx.x );// by * TILE + ty * clayer + threadIdx.x
int colD = ( blockIdx.x * blockDim.x + threadIdx.x );
int loadOffset = threadIdx.y*TILE + threadIdx.x;
for(int i = 0; i < (nlayer - 1) / TILE + 1 ; i++){
if( (i * TILE + threadIdx.y) < nlayer && colW < clayer)
sWj[loadOffset] = W_j[ (i * TILE + threadIdx.y) * clayer + colW ];
else
sWj[loadOffset] = 0.0;
if((i * TILE + threadIdx.y) < nlayer && colD < bsize)
sDjj[loadOffset] = D_jj[ (i * TILE + threadIdx.y) * bsize + colD ];
else
sDjj[loadOffset] = 0.0;
__syncthreads();
for(int j=0;j<TILE;j++) Dj += sWj[j * TILE + threadIdx.y] * sDjj[j * TILE + threadIdx.x];
__syncthreads();
}
int row = ( blockIdx.y * blockDim.y + threadIdx.y );
if( row < clayer && colD < bsize) D_j[row * bsize + colD] = Dj;
}
/*
* Hadamard product with derivative activation values
*/
template<typename DATA_T, typename ACT_F, unsigned int TILE>
__global__ void hmprod_mmul(
DATA_T *D_j,
DATA_T *W_j,
DATA_T *A_j,
ACT_F F,
unsigned int nlayer,
unsigned int clayer,
unsigned int bsize
)
{
__shared__ DATA_T sWj[TILE * TILE];
__shared__ DATA_T sAj[TILE * TILE];
__shared__ DATA_T bias[TILE];
int row = ( blockIdx.y * blockDim.y + threadIdx.y );
int col = ( blockIdx.x * blockDim.x + threadIdx.x );
if(threadIdx.x == 0) bias[threadIdx.y] = W_j[row * (clayer + 1) + clayer];
__syncthreads();
DATA_T Dj = bias[threadIdx.y];
int loadOffset = threadIdx.y*TILE + threadIdx.x;
for(int i = 0;i < ((clayer - 1) / TILE) + 1; i++){
if( row < nlayer && (i * TILE + threadIdx.x ) < clayer)
sWj[loadOffset] = W_j[ row * ( clayer + 1 ) + i * TILE + threadIdx.x];// clayer + 1 to avoid bias vector
else sWj[loadOffset] = 0.0;
if ( i*TILE + threadIdx.y < clayer && col < bsize )
sAj[loadOffset] = A_j[(i * TILE + threadIdx.y) * bsize + col];
else sAj[loadOffset] = 0.0;
__syncthreads();
for(int j = 0;j < TILE; j++) Dj += sWj[threadIdx.y * TILE + j] * sAj[j * TILE + threadIdx.x];
__syncthreads();
}
if( row < nlayer && col < bsize )
D_j[row * bsize + col ] *= F.D(Dj);
}
template<typename DATA_T, typename ACT_F, unsigned int TILE>
__global__ void hmprod_mmul_sigmoid(
DATA_T *D_j,
DATA_T *A_j,
unsigned int nlayer,
unsigned int clayer,
unsigned int bsize
)
{
int row = ( blockIdx.y * blockDim.y + threadIdx.y );
int col = ( blockIdx.x * blockDim.x + threadIdx.x );
DATA_T Aj = A_j[row * bsize + col];
if( row < nlayer && col < bsize )
D_j[row * bsize + col ] *= Aj * (1-Aj);
}
/*
* Compute weight update matrices for the current batch.
* A = [ A ones(bsize) ]
* for i = 1 : dsz(2)
* W = W + D(:,i) * A(:,i)';
* end
* W ( nlayer x (clayer + 1))
*/
template<typename DATA_T, unsigned int TILE>
__global__ void tvecpvec(
DATA_T *W_j,
DATA_T *D_jj,
DATA_T *A_j,
unsigned int nlayer,
unsigned int bsize,
unsigned int clayer,
float lrate
){
__shared__ DATA_T sDjj[TILE * TILE];
__shared__ DATA_T sAj[TILE * TILE];
DATA_T Wj = 0.0;
int rowD = (blockIdx.y * blockDim.y + threadIdx.y);
int rowA = (blockIdx.x * blockDim.x + threadIdx.y);
for(int i = 0;i < (bsize - 1) / TILE + 1;i++){
if(rowD < nlayer && (i*TILE + threadIdx.x) < bsize)
sDjj[threadIdx.y * TILE + threadIdx.x] = D_jj[rowD * bsize + i*TILE + threadIdx.x];
else
sDjj[threadIdx.y * TILE + threadIdx.x] = 0.0;
if(rowA < clayer && (i*TILE + threadIdx.x) < bsize)
sAj[threadIdx.x * TILE + threadIdx.y] = A_j[rowA * bsize + i*TILE + threadIdx.x];
else
sAj[threadIdx.x * TILE + threadIdx.y] = 1.0;//Required to update bias weights//
__syncthreads();
for(int j = 0 ; j < TILE; j++)
Wj += sDjj[threadIdx.y * TILE + j] * sAj[j * TILE + threadIdx.x];
__syncthreads();
}
int col = (blockIdx.x * blockDim.x + threadIdx.x);
Wj *= (lrate / bsize);
if( rowD < nlayer && col < clayer + 1)//clayer + 1 to update bias weights.
W_j[rowD * (clayer + 1) + col] += Wj;
}
template<typename DATA_T,unsigned int init>
__global__ void initVector(DATA_T *M, unsigned int rows, unsigned int cols){
int i = threadIdx.x + blockDim.x * blockIdx.x;
while( i < rows * cols){
if (init == ZEROS ) M[i] = 0.0;
else if (init == ONES) M[i] = 1.0;
else if (init == RANDOM) M[i] = cudaUniRand(i);
i+=gridDim.x * blockDim.x;
}
}
template<typename DATA_T>
__global__ void printGPU2(DATA_T *A, unsigned int row, unsigned int col){
for(int i =0;i<row;i++){
printf("[ ");
for(int j=0;j<col;j++){
printf("%.1f ", A[i*col + j]);
}
printf(" ]\n");
}
}
template<typename DATA_T>
__host__ void printGPU(DATA_T *A, unsigned int row, unsigned int col){
DATA_T *AA;
allocHostMem<DATA_T>(&AA,sizeof(DATA_T)*row*col,"Error allocating AA in printGPU");
safeCpyToHost<DATA_T>(AA,A,sizeof(DATA_T)*row*col,"Error copying AA in printGPU");
printf("[ ");
for(int i =0;i<row;i++){
for(int j=0;j<col;j++){
printf("%.1f ", AA[i*col + j]);
}
printf(" ;\n");
}
printf(" ]\n");
}
}
namespace gnn{
template<typename DATA_T, typename ACT_F>
void GNeuralNetwork<DATA_T,ACT_F>::randomInit(){
if(network == NULL) vz::error("Network architecture missing. Use createLayers first!");
//std::cout<<"Initializing random weights: "<<std::endl;
hipSetDevice(CUDA_DEVICE);
gnn_kernels::cudaInitRandStates();
for(int i = 0;i < layers-1;i++){
//std::cout<<network[i].clayer << "{}" << network[i].nlayer << std::endl;
unsigned int vector_size = network[i].nlayer * network[i].clayer;
dim3 grid = grid_1D(vector_size,BSIZE);
dim3 block = block_1D(BSIZE);
hipLaunchKernelGGL(( gnn_kernels::randomWeights<DATA_T>), dim3(grid),dim3(block), 0, 0, network[i].W_j,network[i].nlayer,network[i].clayer);
handleDeviceErrors(hipDeviceSynchronize(),"Error executing randomWeights kernel");
}
}
template<typename DATA_T, typename ACT_F>
void GNeuralNetwork<DATA_T,ACT_F>::train(){
hipSetDevice(CUDA_DEVICE);
if(network == NULL) vz::error("Network architecture missing. Use createLayers first!");
if(bsize == 0) vz::error("Batch size not set. Use setBatchSize first!");
unsigned int nbatch = this->transpose ? dimEx.first / this->bsize : dimEx.second / this->bsize;
for(int i = 0; i< nbatch; i++){
/*
* Load current batch of training examples.
*/
unsigned int bRow = i * this->bsize;
dim3 lgrid((batch[0].clayer-1)/LTILE + 1, (batch[0].bsize-1)/LTILE + 1);
dim3 lblock(LTILE,LTILE);
hipLaunchKernelGGL(( gnn_kernels::loadT<DATA_T,LTILE>), dim3(lgrid),dim3(lblock), 0, 0,
batch[0].A_j,dExamples,
batch[0].clayer,batch[0].bsize,
dimEx.first,dimEx.second,
bRow,0
);
handleDeviceErrors(hipDeviceSynchronize(),"Error executing loadT for X batch");
if(DEBUG_T){
//printf("A=");
gnn_kernels::printGPU(batch[0].A_j,batch[0].clayer,batch[0].bsize);
hipDeviceSynchronize();
printf("sum(sum(round(A - M(%d:%d,1:%d)')))\n",bRow + 1, (i+1)*batch[0].bsize,batch[0].clayer);
}
/*
* Neural network feed forward step.
* - W = ( nlayer x (clayer + 1) ), A(i) = ( clayer x bsize ) , A(i+1) = (nlayer x bsize)
* A[jj] = A[j] * W[j]
*/
for(int j = 0;j < this->layers - 1;j++){
dim3 agrid((batch[j+1].bsize - 1)/TTILE + 1, (batch[j+1].clayer - 1)/TTILE + 1);
dim3 ablock(TTILE,TTILE);
hipLaunchKernelGGL(( gnn_kernels::mmul<DATA_T,ACT_F,TTILE>), dim3(agrid),dim3(ablock), 0, 0,
batch[j+1].A_j,
network[j].W_j,
batch[j].A_j,
F,
network[j].nlayer,
network[j].clayer - 1,// Ignore bias vector from the multiplication//
batch[j].bsize
);
handleDeviceErrors(hipDeviceSynchronize(),"Error executing batch activation");
if(DEBUG_GNN){
printf("Ajj= ");
gnn_kernels::printGPU(batch[j+1].A_j,batch[j+1].clayer,batch[j+1].bsize);
hipDeviceSynchronize(); //printf("------------------>\n");
printf(";W= ");
gnn_kernels::printGPU(network[j].W_j,network[j].nlayer,network[j].clayer);
hipDeviceSynchronize(); //printf("------------------>\n");
printf(";Aj= ");
gnn_kernels::printGPU(batch[j].A_j,batch[j].clayer,batch[j].bsize);
hipDeviceSynchronize();
printf("Ejj=act(W,Aj,0);\n");
printf("diff0%d = sum(sum(round(Ejj-Ajj)))\n",j);
}
}
/*
* Output layer Delta computation.
* Dl = (Y - Al)
*
*/
dim3 ogrid = grid_1D(batch[layers-1].clayer * batch[layers-1].bsize, BSIZE);
dim3 oblock = block_1D(BSIZE);
bRow = i * this->bsize;
dim3 lygrid((batch[layers-1].clayer-1)/LTILE + 1, (batch[layers-1].bsize-1)/LTILE + 1);
dim3 lyblock(LTILE,LTILE);
hipLaunchKernelGGL(( gnn_kernels::loadT<DATA_T,LTILE>), dim3(lygrid),dim3(lyblock), 0, 0,
batch[layers-1].Y,dExamples,
batch[layers-1].clayer,batch[layers-1].bsize,
dimEx.first,dimEx.second,
bRow,batch[0].clayer
);
if(DEBUG_T){
handleDeviceErrors(hipDeviceSynchronize(),"Error executing loadT for Y batch");
printf("Y=");
gnn_kernels::printGPU(batch[layers-1].Y,batch[layers-1].clayer,batch[layers-1].bsize);
hipDeviceSynchronize();
printf("sum(sum(round(Y - M(%d:%d,%d:%d)')))\n",bRow + 1, (i+1)*batch[0].bsize,batch[0].clayer+1,batch[0].clayer+batch[layers-1].clayer);
}
hipLaunchKernelGGL(( gnn_kernels::outputD<DATA_T>), dim3(ogrid),dim3(oblock), 0, 0,
batch[layers-1].D_j,
batch[layers-1].Y,
batch[layers-1].A_j,// Dj = Y - Aj
batch[layers-1].clayer * batch[layers-1].bsize
);
handleDeviceErrors(hipDeviceSynchronize(),"Error executing outputD kernel");
if(DEBUG_GNN){
printf("Y=");
gnn_kernels::printGPU(batch[layers-1].Y,batch[layers-1].clayer,batch[layers-1].bsize);
hipDeviceSynchronize();
//if(DEBUG_GNN){
printf(";Aj=");
gnn_kernels::printGPU(batch[layers-1].A_j,batch[layers-1].clayer,batch[layers-1].bsize);
hipDeviceSynchronize();
printf(";Dl=");
gnn_kernels::printGPU(batch[layers-1].D_j,batch[layers-1].clayer,batch[layers-1].bsize);
hipDeviceSynchronize();
printf(";El = (Y - Aj);\n");
printf("diffY = sum(sum(round(El - Dl)))\n");
}
/*
* Backpropagation transpose matrix multiplication.
* for i = layers-1 : > 1 : i--
* batch[i-1].D_j = network[i-1].W_j * batch[i].D_j
* grid = (batch[i-1].bsize / TILE + 1), batch[i-1].clayer / TILE + 1
* block = (TILE, TILE)
* D[j] = <W[j] * D[jj]> .* F.D(W[j] * A[j])
*/
for(int j = layers-1; j > 1 ; j--){
//printf("BP(%d)\n",j);
dim3 dgrid((batch[j-1].bsize - 1) / TTILE + 1, (batch[j-1].clayer - 1) / TTILE + 1);
dim3 dblock(TTILE, TTILE);
hipLaunchKernelGGL(( gnn_kernels::tmmul<DATA_T,TTILE>), dim3(dgrid),dim3(dblock), 0, 0,
batch[j-1].D_j,//(clayer x bsize)
network[j-1].W_j,//(nlayer x clayer)
batch[j].D_j,// (nlayer x bsize)
network[j-1].clayer,
network[j-1].nlayer,
batch[j].bsize
);
handleDeviceErrors(hipDeviceSynchronize(),"Error executing tmmul kernel");
if(DEBUG_GNN){
printf("Djj=");
gnn_kernels::printGPU(batch[j-1].D_j,batch[j-1].clayer,batch[j-1].bsize);
hipDeviceSynchronize(); //printf("------------------>\n");
printf(";W=");
gnn_kernels::printGPU(network[j-1].W_j,network[j-1].nlayer,network[j-1].clayer);
hipDeviceSynchronize();//printf("------------------>\n");
printf(";Dj=");
gnn_kernels::printGPU(batch[j].D_j,batch[j].clayer,batch[j].bsize);
hipDeviceSynchronize();//printf("------------------>\n");
printf("Ejj=W' * Dj; \n");
printf("diff1%d = sum(sum(round(Ejj(1:%d,:) - Djj)))\n",j-1,network[j-1].clayer-1);
}
}
/*
* Final step for delta computation.
* //D[jj] = D[jj] .* F.D(W[j] * A(j))
*/
for(int j = 1; j < layers-1; j++){
dim3 dgrid((batch[j].bsize - 1) / TTILE + 1, (batch[j].clayer - 1) / TTILE + 1);
dim3 dblock(TTILE, TTILE);
if(DEBUG_GNN){
printf("Djj=");
gnn_kernels::printGPU(batch[j].D_j,batch[j].clayer,batch[j].bsize);
hipDeviceSynchronize(); //printf("------------------>\n");
printf(";Wj=");
gnn_kernels::printGPU(network[j-1].W_j,network[j-1].nlayer,network[j-1].clayer);
hipDeviceSynchronize();//printf("------------------>\n");
printf(";Aj=");
gnn_kernels::printGPU(batch[j-1].A_j,batch[j-1].clayer,batch[j-1].bsize);
hipDeviceSynchronize();//printf("------------------>\n");
}
if(strcmp(F.TAG,"Sigmoid")!=0){
hipLaunchKernelGGL(( gnn_kernels::hmprod_mmul<DATA_T,ACT_F,TTILE>), dim3(dgrid), dim3(dblock), 0, 0,
batch[j].D_j,
network[j-1].W_j,
batch[j-1].A_j,
F,
network[j-1].nlayer,
network[j-1].clayer-1,
batch[j-1].bsize
);
}else{
hipLaunchKernelGGL(( gnn_kernels::hmprod_mmul_sigmoid<DATA_T,ACT_F,TTILE>), dim3(dgrid), dim3(dblock), 0, 0,
batch[j].D_j,
batch[j].A_j,
network[j-1].nlayer,
network[j-1].clayer-1,
batch[j-1].bsize
);
}
handleDeviceErrors(hipDeviceSynchronize(),"Error executing tmmul kernel");//TODO not necessary
if(DEBUG_GNN){
printf(";Ejj=");
gnn_kernels::printGPU(batch[j].D_j,batch[j].clayer,batch[j].bsize);
hipDeviceSynchronize(); //printf("------------------>\n");
printf(";Djj = Djj .* act(Wj,Aj,1);\n");
printf("diff2%d=sum(sum(round(Ejj - Djj)))\n",j);
}
}
/*
* Weight and bias update
* W[j] = W[j] + (lrate/bsize) * Sum( D[jj] <> A[j] )
*/
for(int j = 0;j<layers-1; j++){
dim3 grid((network[j].clayer - 1)/TTILE + 1, (network[j].nlayer - 1)/TTILE + 1 );
dim3 block(TTILE,TTILE);
if(DEBUG_GNN){
printf("Wj=");
gnn_kernels::printGPU(network[j].W_j,network[j].nlayer,network[j].clayer);
hipDeviceSynchronize();
printf(";Djj=");
gnn_kernels::printGPU(batch[j+1].D_j,batch[j+1].clayer,batch[j+1].bsize);
hipDeviceSynchronize();
printf(";Aj=");
gnn_kernels::printGPU(batch[j].A_j,batch[j].clayer,batch[j].bsize);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( gnn_kernels::tvecpvec<DATA_T,TTILE>), dim3(grid),dim3(block), 0, 0,
network[j].W_j,
batch[j+1].D_j,
batch[j].A_j,
network[j].nlayer,
batch[j].bsize,
network[j].clayer-1,
this->lrate
);
handleDeviceErrors(hipDeviceSynchronize(),"Error executing tvecpvec kernel");//TODO not needed to wait
if(DEBUG_GNN){
printf("Ej=");
gnn_kernels::printGPU(network[j].W_j,network[j].nlayer,network[j].clayer);
hipDeviceSynchronize();
printf("diff3%d=sum(sum(round(Ej-tvecpvec(Wj,Djj,Aj,%f,%d))))\n",j,this->lrate,batch[j].bsize);
}
}
}
}
template<typename DATA_T, typename ACT_F>
void GNeuralNetwork<DATA_T,ACT_F>::classify(){
hipSetDevice(CUDA_DEVICE);
if(network == NULL) vz::error("Network architecture missing. Use createLayers first!");
if(bsize == 0) vz::error("Batch size not set. Use setBatchSize first!");
unsigned int nbatch = this->transpose ? dimT.first / this->bsize : dimT.second / this->bsize;
unsigned int count =0;
if(DEBUG_GNN) std::cout<<dimT.first << "," << dimT.second << std::endl;
if(DEBUG_GNN) std::cout<< "nbatch: " << nbatch << std::endl;
DATA_T *Y,*A;
allocHostMem<DATA_T>(&Y,sizeof(DATA_T)*this->bsize*batch[layers-1].clayer,"Error allocating mem for Y in classify");
allocHostMem<DATA_T>(&A,sizeof(DATA_T)*this->bsize*batch[layers-1].clayer,"Error allocating mem for A in classify");
for(int i = 0; i< nbatch; i++){
/*
* Load current batch of training examples.
*/
if(this->transpose){
unsigned int bRow = i * this->bsize;
dim3 lgrid((batch[0].clayer-1)/TTILE + 1, (batch[0].bsize-1)/TTILE + 1);
dim3 lblock(TTILE,TTILE);
//print_grid(lgrid,lblock);
hipLaunchKernelGGL(( gnn_kernels::loadT<DATA_T,TTILE>), dim3(lgrid),dim3(lblock), 0, 0,
batch[0].A_j,dTest,
batch[0].clayer,batch[0].bsize,
dimT.first,dimT.second,
bRow,0);
handleDeviceErrors(hipDeviceSynchronize(),"Error executing loadT for X batch on classify");
}
/*
* Neural network feed forward step.
* - W = ( nlayer x (clayer + 1) ), A(i) = ( clayer x bsize ) , A(i+1) = (nlayer x bsize)
* A[jj] = A[j] * W[j]
*/
for(int j = 0;j < this->layers - 1;j++){
dim3 agrid((batch[j+1].bsize - 1)/TTILE + 1, (batch[j+1].clayer - 1)/TTILE + 1);
dim3 ablock(TTILE,TTILE);
hipLaunchKernelGGL(( gnn_kernels::mmul<DATA_T,ACT_F,TTILE>), dim3(agrid),dim3(ablock), 0, 0,
batch[j+1].A_j,
network[j].W_j,
batch[j].A_j,
F,
network[j].nlayer,
network[j].clayer - 1,// Ignore bias vector from the multiplication//
batch[j].bsize
);
handleDeviceErrors(hipDeviceSynchronize(),"Error executing batch activation");
}
unsigned int bRow = i * this->bsize;
dim3 lgrid((batch[layers-1].clayer-1)/TTILE + 1, (batch[layers-1].bsize-1)/TTILE + 1);
dim3 lblock(TTILE,TTILE);
hipLaunchKernelGGL(( gnn_kernels::loadT<DATA_T,TTILE>), dim3(lgrid),dim3(lblock), 0, 0,
batch[layers-1].Y,dTest,
batch[layers-1].clayer,batch[layers-1].bsize,
dimT.first,dimT.second,
bRow,batch[0].clayer
);
handleDeviceErrors(hipDeviceSynchronize(),"Error executing loadT for Y batch on classify");
safeCpyToHost<DATA_T>(Y,batch[layers-1].Y,sizeof(DATA_T)*batch[layers-1].clayer*this->bsize,"Error transferring Y from GPU");
safeCpyToHost<DATA_T>(A,batch[layers-1].A_j,sizeof(DATA_T)*batch[layers-1].clayer*this->bsize,"Error transferring A_j from GPU");
for(int x=0; x < bsize; x++){
DATA_T maxY = 0, maxA=0;
int indexY = 0, indexA=0;
for(int y = 0; y < batch[layers-1].clayer; y++){
if(Y[y * bsize + x] > maxY){ maxY = Y[y * bsize + x]; indexY = y;}
if(A[y * bsize + x] > maxA){ maxA = A[y * bsize + x]; indexA = y;}
}
if(indexY == indexA ) count++;
}
}
printf("Accuracy: %2.f, %u, %lu\n",(((float)count)/dimT.first)*100,count, dimT.first);
hipHostFree(Y); hipHostFree(A);
}
/*
* Testing methods
*/
template<typename DATA_T, typename ACT_F>
void GNeuralNetwork<DATA_T,ACT_F>::bench_act(){
hipSetDevice(0);
dim3 block(512,1,1);
dim3 grid(128,1,1);
/*
* Warm up device
*/
hipLaunchKernelGGL(( gnn_kernels::bench_test_activation<ACT_F>), dim3(grid),dim3(block), 0, 0, this->F);
hipDeviceSynchronize();
/* <END> */
std::string msg("Benchmark ");
msg.append(F.TAG);
Time<millis> t;
t.start();
hipLaunchKernelGGL(( gnn_kernels::bench_test_activation<ACT_F>), dim3(grid),dim3(block), 0, 0, F);
hipDeviceSynchronize();
t.lap(msg);
}
template<typename DATA_T, typename ACT_F>
void GNeuralNetwork<DATA_T,ACT_F>::print_weights(){
DATA_T *cW_j;
hipSetDevice(0);
for(int i = 0;i < layers-1;i++){
unsigned int size = network[i].nlayer * network[i].clayer;
allocHostMem<DATA_T>(&cW_j,sizeof(DATA_T)*size, "Error Allocating Host Weight Matrix");
safeCpyToHost<DATA_T>(cW_j,network[i].W_j,sizeof(DATA_T)*size, "Error Allocating Copying Weight Matrix From Device");
printf("W%d=[",i);
for(int j = 0;j<size;j++){
std::cout<<cW_j[j] << " ";
if((j+1)%network[i].clayer == 0) std::cout<<std::endl;
}
printf("]");
std::cout<<std::endl;
}
for(int i = 0;i < layers-1;i++) printf("A%d=act(W%d,A%d,0)\n",i+1,i,i);
}
template<typename DATA_T, typename ACT_F>
void GNeuralNetwork<DATA_T,ACT_F>::bench_test_kernels(UnitTest test,unsigned int m, unsigned int n, unsigned int k,
bool debug){
unsigned int nlayer = m, clayer = n + 1, bsize = k;
DATA_T *hostA, *hostB, *hostC, *hostD;
DATA_T *devA, *devB, *devC;
allocDevMem<DATA_T>(&devA,sizeof(DATA_T) * nlayer * clayer, "Error allocating devA memory");
allocDevMem<DATA_T>(&devB,sizeof(DATA_T) * clayer * bsize, "Error allocating devB memory");
allocDevMem<DATA_T>(&devC,sizeof(DATA_T) * nlayer * bsize, "Error allocating devC memory");
allocHostMem<DATA_T>(&hostA,sizeof(DATA_T) * nlayer * clayer, "Error allocating devA memory");
allocHostMem<DATA_T>(&hostB,sizeof(DATA_T) * clayer * bsize, "Error allocating devB memory");
allocHostMem<DATA_T>(&hostC,sizeof(DATA_T) * nlayer * bsize, "Error allocating devC memory");
dim3 rgrid;
dim3 rblock = block_1D(256);
rgrid = grid_1D(nlayer * clayer,256);hipLaunchKernelGGL(( gnn_kernels::randomWeights<DATA_T>), dim3(rgrid),dim3(rblock), 0, 0, devA,nlayer, clayer);
rgrid = grid_1D(clayer * bsize,256);hipLaunchKernelGGL(( gnn_kernels::randomWeights<DATA_T>), dim3(rgrid),dim3(rblock), 0, 0, devB,clayer, bsize);
rgrid = grid_1D(nlayer * bsize,256);hipLaunchKernelGGL(( gnn_kernels::randomWeights<DATA_T>), dim3(rgrid),dim3(rblock), 0, 0, devC,nlayer,bsize);
if(test == MMUL){
dim3 agrid((bsize - 1)/TTILE + 1, (nlayer - 1)/TTILE + 1);
dim3 ablock(TTILE,TTILE);
Time<millis> t;
t.start();
hipLaunchKernelGGL(( gnn_kernels::mmul<DATA_T,ACT_F,TTILE>), dim3(agrid),dim3(ablock), 0, 0,
devC,
devA,
devB,
F,
nlayer,
clayer - 1,
bsize
);
handleDeviceErrors(hipDeviceSynchronize(),"Error executing batch mmul");
t.lap("GPU serial mmul elapsed time");
allocHostMem<DATA_T>(&hostD,sizeof(DATA_T) * nlayer * bsize, "Error allocating devC memory");
safeCpyToHost<DATA_T>(hostA,devA,sizeof(DATA_T)*nlayer*clayer,"Error copying devA to host");
safeCpyToHost<DATA_T>(hostB,devB,sizeof(DATA_T)*clayer*bsize,"Error copying devB to host");
safeCpyToHost<DATA_T>(hostC,devC,sizeof(DATA_T)*nlayer*bsize,"Error copying devC to host");
/*t.start();
for(int x = 0; x < nlayer; x++){//3
for (int y = 0; y < bsize; y++){//3
hostD[x * bsize + y] = hostA[x * (clayer) + clayer - 1];
for (int z = 0; z < clayer - 1; z++){//2
hostD[x * bsize + y] += hostA[x * (clayer) + z] * hostB[z * bsize + y];
}
hostD[x * bsize + y] = F.f(hostD[x * bsize + y]);
}
}
if(!debug) t.lap("CPU serial mmul elapsed time");
if(debug){
gnn_kernels::printGPU(devC,m,k);
hipDeviceSynchronize(); printf("<----->\n");
gnn_kernels::printGPU(devA,m,n);
hipDeviceSynchronize(); printf("<----->\n");
gnn_kernels::printGPU(devB,n,k);
hipDeviceSynchronize(); printf("<----->\n");
for(int x = 0; x<m * k;x++){
printf("%.4f ", hostD[x]);
if((x+1)%k==0) printf("\n");
}
}else{
for(int x = 0; x<nlayer * bsize;x++){
if(((hostD[x] - hostC[x]) > 0.001 )){
printf("Result matrices do not match(%f,%f)!!!\n",hostD[x],hostC[x] );
}
}
}
hipHostFree(hostD);*/
}else if(test == TMMUL){
// devB = devA * devC
// (n x k) = (m x n) (m x k) <=> (n x k) = (m x n)^T (m x k) <=> (n x k) = (n x m) (m x k)
Time<millis> t;
dim3 agrid((bsize - 1)/TTILE + 1, (clayer - 1)/TTILE + 1);
dim3 ablock(TTILE,TTILE);
t.start();
hipLaunchKernelGGL(( gnn_kernels::tmmul<DATA_T,TTILE>), dim3(agrid),dim3(ablock), 0, 0,
devB,//n
devA,//
devC,//
clayer,
nlayer,
bsize
);
handleDeviceErrors(hipDeviceSynchronize(),"Error executing tmmul kernel");
t.lap("GPU serial tmmul elapsed time");
allocHostMem<DATA_T>(&hostD,sizeof(DATA_T) * clayer * bsize, "Error allocating devC memory");
safeCpyToHost<DATA_T>(hostA,devA,sizeof(DATA_T)*nlayer*clayer,"Error copying devA to host");
safeCpyToHost<DATA_T>(hostB,devB,sizeof(DATA_T)*clayer*bsize,"Error copying devB to host");
safeCpyToHost<DATA_T>(hostC,devC,sizeof(DATA_T)*nlayer*bsize,"Error copying devC to host");
/*t.start();
for(int x = 0; x < clayer; x++){//3
for (int y = 0; y < bsize; y++){//3
hostD[x * bsize + y] = 0.0;
for (int z = 0; z < nlayer; z++){//2
hostD[x * bsize + y] += hostA[z * clayer + x] * hostC[z * bsize + y];
}
}
}
if(!debug) t.lap("CPU serial mmul elapsed time");
if(debug){
//print_grid(agrid,ablock);
//gnn_kernels::printGPU(devA,nlayer,clayer);
//hipDeviceSynchronize(); printf("<----->\n");
//gnn_kernels::printGPU(devC,nlayer,bsize);
//hipDeviceSynchronize(); printf("<----->\n");
gnn_kernels::printGPU(devB,clayer,bsize);
hipDeviceSynchronize();
for(int x = 0; x<clayer * bsize;x++){
printf("%.4f ", hostD[x]);
if((x+1)%k==0) printf("\n");
}
}else{
for(int x = 0; x<clayer * bsize;x++){
if(((hostD[x] - hostB[x]) > 0.001 )){
printf("Result matrices do not match(%f,%f)!!!\n",hostD[x],hostB[x] );
}
}
}*/
hipHostFree(hostD);
}else if (test == MHPROD){
dim3 dgrid((bsize - 1) / TTILE + 1, (clayer - 1) / TTILE + 1);
dim3 dblock(TTILE, TTILE);
if(debug){
printf("D=");
gnn_kernels::printGPU(devC,nlayer,bsize);
hipDeviceSynchronize();
printf("W=");
gnn_kernels::printGPU(devA,nlayer,clayer);
hipDeviceSynchronize();
printf("A=");
gnn_kernels::printGPU(devB,clayer-1,bsize);
hipDeviceSynchronize();
}
Time<millis> t;
t.start();
hipLaunchKernelGGL(( gnn_kernels::hmprod_mmul<DATA_T,ACT_F,TTILE>), dim3(dgrid), dim3(dblock), 0, 0,
devC,
devA,
devB,
F,
nlayer,
clayer-1,
bsize
);
handleDeviceErrors(hipDeviceSynchronize(),"Error executing hmprod_tmmul kernel");
t.lap("GPU serial hmprod elapsed time");
allocHostMem<DATA_T>(&hostD,sizeof(DATA_T) * nlayer * bsize, "Error allocating devC memory");
safeCpyToHost<DATA_T>(hostA,devA,sizeof(DATA_T)*nlayer*clayer,"Error copying devA to host");
safeCpyToHost<DATA_T>(hostB,devB,sizeof(DATA_T)*clayer*bsize,"Error copying devB to host");
safeCpyToHost<DATA_T>(hostC,devC,sizeof(DATA_T)*nlayer*bsize,"Error copying devC to host");
/*for(int x = 0; x < nlayer; x++){//3
for (int y = 0; y < bsize; y++){//3
hostD[x * bsize + y] = hostA[x * (clayer) + clayer - 1];
for (int z = 0; z < clayer - 1; z++){//2
hostD[x * bsize + y] += hostA[x * (clayer) + z] * hostB[z * bsize + y];
}
hostD[x * bsize + y] = F.f(hostD[x * bsize + y]);
}
}*/
if(debug){
printf("R=");
gnn_kernels::printGPU(devC,nlayer,bsize);
hipDeviceSynchronize();
for(int x = 0; x<nlayer * bsize;x++){
printf("%.4f ", hostD[x]);
if((x+1)%k==0) printf("\n");
}
}
}else if( test == TVECPVEC ){
dim3 grid((clayer - 1)/TTILE + 1, (nlayer - 1)/TTILE + 1 );
dim3 block(TTILE,TTILE);
if(false){
//print_grid(grid,block);
printf("W=");
gnn_kernels::printGPU(devA,nlayer,clayer);
hipDeviceSynchronize();
printf("D=");
gnn_kernels::printGPU(devC,nlayer,bsize);
hipDeviceSynchronize();
printf("A=");
gnn_kernels::printGPU(devB,clayer-1,bsize);
hipDeviceSynchronize();
printf("E=tvecpvec(W,D,A,%f,%d)\n",0.3233,bsize);
}
Time<millis> t;
t.start();
hipLaunchKernelGGL(( gnn_kernels::tvecpvec<DATA_T,TTILE>), dim3(grid),dim3(block), 0, 0,
devA,
devC,
devB,
nlayer,
bsize,
clayer-1,
0.0231
);
handleDeviceErrors(hipDeviceSynchronize(),"Error executing tvecpvec kernel");
t.lap("GPU serial tvecpvec elapsed time");
if(debug){
//printf("R=");
gnn_kernels::printGPU(devA,nlayer,clayer);
hipDeviceSynchronize();
//printf("round(R-E)\n");
}
}
hipFree(devA); hipFree(devB); hipFree(devC);
hipHostFree(hostA); hipHostFree(hostB); hipHostFree(hostC); hipHostFree(hostD);
hipDeviceReset();
}
template class GNeuralNetwork<float,gnn_actf::Sigmoid>;
template class GNeuralNetwork<float,gnn_actf::FSigmoid>;
template class GNeuralNetwork<float,gnn_actf::Arctan>;
//template class GNeuralNetwork<double,gnn_actf::Sigmoid>;
//template class GNeuralNetwork<double,gnn_actf::FSigmoid>;
//template class GNeuralNetwork<double,gnn_actf::Arctan>;
}
| f90e7b9a1f5a5dbc49a35925fadd9a00781332d4.cu | #include "GNNConfig.h"
#include "../common/Time.h"
/*
* @author Vasileios Zois
* @email vzois@usc.edu
*
* CUDA neural network implementation
*/
#define MBATCH 128
#define TTILE 32
#define LTILE 32
#define DPT 4 //DATA PER THREADS
#define BSIZE 512
namespace gnn_kernels{
static __device__ curandState randDevStates[RAND_STATES];
__device__ float cudaUniRand(unsigned int tid){
return curand_uniform(&randDevStates[tid % RAND_STATES]);
}
__global__ void cudaSetupRandStatesKernel(unsigned int seed){
int i = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed, blockIdx.x, 0, &randDevStates[i]);
}
__host__ void cudaInitRandStates(){
dim3 grid = grid_1D(RAND_STATES,RAND_BLOCK_THREADS);
dim3 block = block_1D(RAND_BLOCK_THREADS);
Utils<unsigned int> u;
cudaSetupRandStatesKernel<<<grid,block>>>(u.uni(UINT_MAX));
handleDeviceErrors(cudaDeviceSynchronize(),"Error initializing random states");
}
/*
* Testing activation functions on kernels.
*/
template<typename ACT_F>
__global__ void bench_test_activation(ACT_F F){
int i = threadIdx.x + blockIdx.x * blockDim.x;
float a = 0;
for(int j = 0; j<blockDim.x;j++){
a+= F.F((float)i*j);
}
}
/*
* Initialize matrices random weights
*/
template<typename DATA_T>
__global__ void randomWeights(DATA_T *W_j,unsigned int rows, unsigned int cols){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < rows * cols){
if((i+1) % cols == 0){
W_j[i] = 0.0;
}else{
W_j[i] = (2.0 * cudaUniRand(i) - 1.0) * (sqrtf(6.0)/ (rows + cols));
}
}
}
/*
* Load current batch of train examples.
* 1: First layer batch array.
* 2: Training example matrix.
* 3: Input layer dimension
* 4: Batch size dimension
* 5: Offset indicating the batch being loaded.
* Notes:
* Transpose version assumes that the training examples matrix is stored
* in a row-wise manner.
*/
template<typename DATA_T, unsigned int TILE>
__global__ void loadT(
DATA_T *A_j,
DATA_T *tEx,
unsigned int clayer, unsigned int bsize,
unsigned int car, unsigned int dim,
unsigned int voffset, unsigned int hoffset
){
__shared__ DATA_T stEx[TILE * TILE];
int row = ( blockIdx.y * blockDim.y + threadIdx.y );
int col = ( blockIdx.x * blockDim.x + threadIdx.x );
if(voffset + row < car && col + hoffset < dim && row < bsize && col < clayer){
stEx[threadIdx.y * TILE + threadIdx.x] = tEx[(row + voffset) * dim + (col + hoffset)];
//stEx[threadIdx.y * TILE + threadIdx.x] = tEx[(voffset + row) * dim + (col + hoffset)];
}
__syncthreads();
//col * bsize + row
row = (blockIdx.x * blockDim.x + threadIdx.y);
col = (blockIdx.y * blockDim.y + threadIdx.x);
if( row < clayer && col < bsize){
A_j[row * bsize + col] = stEx[threadIdx.x * TILE + threadIdx.y];
}
}
/*
* Compute matrix of activation values for a single layer of a given batch.
* 1: Current layer weight matrix.
* 2: Current layer matrix of activation vectors.
* 3: Next layer matrix of activation vectors.
* 4: W_j = nlayer x clayer , A_j = clayer x bsize, A_jj = nlayer x bsize.
* 5: Offset: 0 for hidden and output layer, corresponding row of training example matrix for input layer.
*/
template<typename DATA_T, typename ACT_F, unsigned int TILE>
__global__ void mmul(
DATA_T *A_jj,
DATA_T *W_j,
DATA_T *A_j,
ACT_F F,
unsigned int nlayer,
unsigned int clayer,
unsigned int bsize
)
{
__shared__ DATA_T sWj[TILE * TILE];
__shared__ DATA_T sAj[TILE * TILE];
__shared__ DATA_T bias[TILE];
int row = ( blockIdx.y * blockDim.y + threadIdx.y );
int col = ( blockIdx.x * blockDim.x + threadIdx.x );
if(threadIdx.x == 0) bias[threadIdx.y] = W_j[row * (clayer + 1) + clayer];
__syncthreads();
DATA_T Ajj = bias[threadIdx.y];
int loadOffset = threadIdx.y*TILE + threadIdx.x;
for(int i = 0;i < ((clayer - 1) / TILE) + 1; i++){
if( row < nlayer && (i * TILE + threadIdx.x ) < clayer)
sWj[loadOffset] = W_j[ row * ( clayer + 1 ) + i * TILE + threadIdx.x];// clayer + 1 to avoid bias vector
else sWj[loadOffset] = 0.0;
if ( i*TILE + threadIdx.y < clayer && col < bsize )
sAj[loadOffset] = A_j[(i * TILE + threadIdx.y) * bsize + col];
else sAj[loadOffset] = 0.0;
__syncthreads();
for(int j = 0;j < TILE; j++){
Ajj += sWj[threadIdx.y * TILE + j] * sAj[j * TILE + threadIdx.x];
}
__syncthreads();
}
if( row < nlayer && col < bsize )
A_jj[row * bsize + col ] = F.F(Ajj);
}
/*
* Kernel that computes the last layer difference between the batch activation matrix and the expected output
* matrix.
*/
template<typename DATA_T>
__global__ void outputD(
DATA_T *D_j,
DATA_T *ExA_j,
DATA_T *A_j,
unsigned int size
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < size){ D_j[i] = ExA_j[i] - A_j[i]; }
}
/*
* Transpose matrix multiplication.
* D_j = (W_j)^T . D_jj
*/
template<typename DATA_T, unsigned int TILE>
__global__ void tmmul(
DATA_T *D_j,
DATA_T *W_j,
DATA_T *D_jj,
unsigned int clayer,
unsigned int nlayer,
unsigned int bsize
)
{
__shared__ DATA_T sWj[TILE * TILE];
__shared__ DATA_T sDjj[TILE * TILE];
DATA_T Dj = 0.0;
int colW = ( blockIdx.y * blockDim.y + threadIdx.x );// by * TILE + ty * clayer + threadIdx.x
int colD = ( blockIdx.x * blockDim.x + threadIdx.x );
int loadOffset = threadIdx.y*TILE + threadIdx.x;
for(int i = 0; i < (nlayer - 1) / TILE + 1 ; i++){
if( (i * TILE + threadIdx.y) < nlayer && colW < clayer)
sWj[loadOffset] = W_j[ (i * TILE + threadIdx.y) * clayer + colW ];
else
sWj[loadOffset] = 0.0;
if((i * TILE + threadIdx.y) < nlayer && colD < bsize)
sDjj[loadOffset] = D_jj[ (i * TILE + threadIdx.y) * bsize + colD ];
else
sDjj[loadOffset] = 0.0;
__syncthreads();
for(int j=0;j<TILE;j++) Dj += sWj[j * TILE + threadIdx.y] * sDjj[j * TILE + threadIdx.x];
__syncthreads();
}
int row = ( blockIdx.y * blockDim.y + threadIdx.y );
if( row < clayer && colD < bsize) D_j[row * bsize + colD] = Dj;
}
/*
* Hadamard product with derivative activation values
*/
template<typename DATA_T, typename ACT_F, unsigned int TILE>
__global__ void hmprod_mmul(
DATA_T *D_j,
DATA_T *W_j,
DATA_T *A_j,
ACT_F F,
unsigned int nlayer,
unsigned int clayer,
unsigned int bsize
)
{
__shared__ DATA_T sWj[TILE * TILE];
__shared__ DATA_T sAj[TILE * TILE];
__shared__ DATA_T bias[TILE];
int row = ( blockIdx.y * blockDim.y + threadIdx.y );
int col = ( blockIdx.x * blockDim.x + threadIdx.x );
if(threadIdx.x == 0) bias[threadIdx.y] = W_j[row * (clayer + 1) + clayer];
__syncthreads();
DATA_T Dj = bias[threadIdx.y];
int loadOffset = threadIdx.y*TILE + threadIdx.x;
for(int i = 0;i < ((clayer - 1) / TILE) + 1; i++){
if( row < nlayer && (i * TILE + threadIdx.x ) < clayer)
sWj[loadOffset] = W_j[ row * ( clayer + 1 ) + i * TILE + threadIdx.x];// clayer + 1 to avoid bias vector
else sWj[loadOffset] = 0.0;
if ( i*TILE + threadIdx.y < clayer && col < bsize )
sAj[loadOffset] = A_j[(i * TILE + threadIdx.y) * bsize + col];
else sAj[loadOffset] = 0.0;
__syncthreads();
for(int j = 0;j < TILE; j++) Dj += sWj[threadIdx.y * TILE + j] * sAj[j * TILE + threadIdx.x];
__syncthreads();
}
if( row < nlayer && col < bsize )
D_j[row * bsize + col ] *= F.D(Dj);
}
template<typename DATA_T, typename ACT_F, unsigned int TILE>
__global__ void hmprod_mmul_sigmoid(
DATA_T *D_j,
DATA_T *A_j,
unsigned int nlayer,
unsigned int clayer,
unsigned int bsize
)
{
int row = ( blockIdx.y * blockDim.y + threadIdx.y );
int col = ( blockIdx.x * blockDim.x + threadIdx.x );
DATA_T Aj = A_j[row * bsize + col];
if( row < nlayer && col < bsize )
D_j[row * bsize + col ] *= Aj * (1-Aj);
}
/*
* Compute weight update matrices for the current batch.
* A = [ A ones(bsize) ]
* for i = 1 : dsz(2)
* W = W + D(:,i) * A(:,i)';
* end
* W ( nlayer x (clayer + 1))
*/
template<typename DATA_T, unsigned int TILE>
__global__ void tvecpvec(
DATA_T *W_j,
DATA_T *D_jj,
DATA_T *A_j,
unsigned int nlayer,
unsigned int bsize,
unsigned int clayer,
float lrate
){
__shared__ DATA_T sDjj[TILE * TILE];
__shared__ DATA_T sAj[TILE * TILE];
DATA_T Wj = 0.0;
int rowD = (blockIdx.y * blockDim.y + threadIdx.y);
int rowA = (blockIdx.x * blockDim.x + threadIdx.y);
for(int i = 0;i < (bsize - 1) / TILE + 1;i++){
if(rowD < nlayer && (i*TILE + threadIdx.x) < bsize)
sDjj[threadIdx.y * TILE + threadIdx.x] = D_jj[rowD * bsize + i*TILE + threadIdx.x];
else
sDjj[threadIdx.y * TILE + threadIdx.x] = 0.0;
if(rowA < clayer && (i*TILE + threadIdx.x) < bsize)
sAj[threadIdx.x * TILE + threadIdx.y] = A_j[rowA * bsize + i*TILE + threadIdx.x];
else
sAj[threadIdx.x * TILE + threadIdx.y] = 1.0;//Required to update bias weights//
__syncthreads();
for(int j = 0 ; j < TILE; j++)
Wj += sDjj[threadIdx.y * TILE + j] * sAj[j * TILE + threadIdx.x];
__syncthreads();
}
int col = (blockIdx.x * blockDim.x + threadIdx.x);
Wj *= (lrate / bsize);
if( rowD < nlayer && col < clayer + 1)//clayer + 1 to update bias weights.
W_j[rowD * (clayer + 1) + col] += Wj;
}
template<typename DATA_T,unsigned int init>
__global__ void initVector(DATA_T *M, unsigned int rows, unsigned int cols){
int i = threadIdx.x + blockDim.x * blockIdx.x;
while( i < rows * cols){
if (init == ZEROS ) M[i] = 0.0;
else if (init == ONES) M[i] = 1.0;
else if (init == RANDOM) M[i] = cudaUniRand(i);
i+=gridDim.x * blockDim.x;
}
}
template<typename DATA_T>
__global__ void printGPU2(DATA_T *A, unsigned int row, unsigned int col){
for(int i =0;i<row;i++){
printf("[ ");
for(int j=0;j<col;j++){
printf("%.1f ", A[i*col + j]);
}
printf(" ]\n");
}
}
template<typename DATA_T>
__host__ void printGPU(DATA_T *A, unsigned int row, unsigned int col){
DATA_T *AA;
allocHostMem<DATA_T>(&AA,sizeof(DATA_T)*row*col,"Error allocating AA in printGPU");
safeCpyToHost<DATA_T>(AA,A,sizeof(DATA_T)*row*col,"Error copying AA in printGPU");
printf("[ ");
for(int i =0;i<row;i++){
for(int j=0;j<col;j++){
printf("%.1f ", AA[i*col + j]);
}
printf(" ;\n");
}
printf(" ]\n");
}
}
namespace gnn{
template<typename DATA_T, typename ACT_F>
void GNeuralNetwork<DATA_T,ACT_F>::randomInit(){
if(network == NULL) vz::error("Network architecture missing. Use createLayers first!");
//std::cout<<"Initializing random weights: "<<std::endl;
cudaSetDevice(CUDA_DEVICE);
gnn_kernels::cudaInitRandStates();
for(int i = 0;i < layers-1;i++){
//std::cout<<network[i].clayer << "{}" << network[i].nlayer << std::endl;
unsigned int vector_size = network[i].nlayer * network[i].clayer;
dim3 grid = grid_1D(vector_size,BSIZE);
dim3 block = block_1D(BSIZE);
gnn_kernels::randomWeights<DATA_T><<<grid,block>>>(network[i].W_j,network[i].nlayer,network[i].clayer);
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing randomWeights kernel");
}
}
template<typename DATA_T, typename ACT_F>
void GNeuralNetwork<DATA_T,ACT_F>::train(){
cudaSetDevice(CUDA_DEVICE);
if(network == NULL) vz::error("Network architecture missing. Use createLayers first!");
if(bsize == 0) vz::error("Batch size not set. Use setBatchSize first!");
unsigned int nbatch = this->transpose ? dimEx.first / this->bsize : dimEx.second / this->bsize;
for(int i = 0; i< nbatch; i++){
/*
* Load current batch of training examples.
*/
unsigned int bRow = i * this->bsize;
dim3 lgrid((batch[0].clayer-1)/LTILE + 1, (batch[0].bsize-1)/LTILE + 1);
dim3 lblock(LTILE,LTILE);
gnn_kernels::loadT<DATA_T,LTILE><<<lgrid,lblock>>>(
batch[0].A_j,dExamples,
batch[0].clayer,batch[0].bsize,
dimEx.first,dimEx.second,
bRow,0
);
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing loadT for X batch");
if(DEBUG_T){
//printf("A=");
gnn_kernels::printGPU(batch[0].A_j,batch[0].clayer,batch[0].bsize);
cudaDeviceSynchronize();
printf("sum(sum(round(A - M(%d:%d,1:%d)')))\n",bRow + 1, (i+1)*batch[0].bsize,batch[0].clayer);
}
/*
* Neural network feed forward step.
* - W = ( nlayer x (clayer + 1) ), A(i) = ( clayer x bsize ) , A(i+1) = (nlayer x bsize)
* A[jj] = A[j] * W[j]
*/
for(int j = 0;j < this->layers - 1;j++){
dim3 agrid((batch[j+1].bsize - 1)/TTILE + 1, (batch[j+1].clayer - 1)/TTILE + 1);
dim3 ablock(TTILE,TTILE);
gnn_kernels::mmul<DATA_T,ACT_F,TTILE><<<agrid,ablock>>>
(
batch[j+1].A_j,
network[j].W_j,
batch[j].A_j,
F,
network[j].nlayer,
network[j].clayer - 1,// Ignore bias vector from the multiplication//
batch[j].bsize
);
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing batch activation");
if(DEBUG_GNN){
printf("Ajj= ");
gnn_kernels::printGPU(batch[j+1].A_j,batch[j+1].clayer,batch[j+1].bsize);
cudaDeviceSynchronize(); //printf("------------------>\n");
printf(";W= ");
gnn_kernels::printGPU(network[j].W_j,network[j].nlayer,network[j].clayer);
cudaDeviceSynchronize(); //printf("------------------>\n");
printf(";Aj= ");
gnn_kernels::printGPU(batch[j].A_j,batch[j].clayer,batch[j].bsize);
cudaDeviceSynchronize();
printf("Ejj=act(W,Aj,0);\n");
printf("diff0%d = sum(sum(round(Ejj-Ajj)))\n",j);
}
}
/*
* Output layer Delta computation.
* Dl = (Y - Al)
*
*/
dim3 ogrid = grid_1D(batch[layers-1].clayer * batch[layers-1].bsize, BSIZE);
dim3 oblock = block_1D(BSIZE);
bRow = i * this->bsize;
dim3 lygrid((batch[layers-1].clayer-1)/LTILE + 1, (batch[layers-1].bsize-1)/LTILE + 1);
dim3 lyblock(LTILE,LTILE);
gnn_kernels::loadT<DATA_T,LTILE><<<lygrid,lyblock>>>(
batch[layers-1].Y,dExamples,
batch[layers-1].clayer,batch[layers-1].bsize,
dimEx.first,dimEx.second,
bRow,batch[0].clayer
);
if(DEBUG_T){
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing loadT for Y batch");
printf("Y=");
gnn_kernels::printGPU(batch[layers-1].Y,batch[layers-1].clayer,batch[layers-1].bsize);
cudaDeviceSynchronize();
printf("sum(sum(round(Y - M(%d:%d,%d:%d)')))\n",bRow + 1, (i+1)*batch[0].bsize,batch[0].clayer+1,batch[0].clayer+batch[layers-1].clayer);
}
gnn_kernels::outputD<DATA_T><<<ogrid,oblock>>>(
batch[layers-1].D_j,
batch[layers-1].Y,
batch[layers-1].A_j,// Dj = Y - Aj
batch[layers-1].clayer * batch[layers-1].bsize
);
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing outputD kernel");
if(DEBUG_GNN){
printf("Y=");
gnn_kernels::printGPU(batch[layers-1].Y,batch[layers-1].clayer,batch[layers-1].bsize);
cudaDeviceSynchronize();
//if(DEBUG_GNN){
printf(";Aj=");
gnn_kernels::printGPU(batch[layers-1].A_j,batch[layers-1].clayer,batch[layers-1].bsize);
cudaDeviceSynchronize();
printf(";Dl=");
gnn_kernels::printGPU(batch[layers-1].D_j,batch[layers-1].clayer,batch[layers-1].bsize);
cudaDeviceSynchronize();
printf(";El = (Y - Aj);\n");
printf("diffY = sum(sum(round(El - Dl)))\n");
}
/*
* Backpropagation transpose matrix multiplication.
* for i = layers-1 : > 1 : i--
* batch[i-1].D_j = network[i-1].W_j * batch[i].D_j
* grid = (batch[i-1].bsize / TILE + 1), batch[i-1].clayer / TILE + 1
* block = (TILE, TILE)
* D[j] = <W[j] * D[jj]> .* F.D(W[j] * A[j])
*/
for(int j = layers-1; j > 1 ; j--){
//printf("BP(%d)\n",j);
dim3 dgrid((batch[j-1].bsize - 1) / TTILE + 1, (batch[j-1].clayer - 1) / TTILE + 1);
dim3 dblock(TTILE, TTILE);
gnn_kernels::tmmul<DATA_T,TTILE><<<dgrid,dblock>>>(
batch[j-1].D_j,//(clayer x bsize)
network[j-1].W_j,//(nlayer x clayer)
batch[j].D_j,// (nlayer x bsize)
network[j-1].clayer,
network[j-1].nlayer,
batch[j].bsize
);
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing tmmul kernel");
if(DEBUG_GNN){
printf("Djj=");
gnn_kernels::printGPU(batch[j-1].D_j,batch[j-1].clayer,batch[j-1].bsize);
cudaDeviceSynchronize(); //printf("------------------>\n");
printf(";W=");
gnn_kernels::printGPU(network[j-1].W_j,network[j-1].nlayer,network[j-1].clayer);
cudaDeviceSynchronize();//printf("------------------>\n");
printf(";Dj=");
gnn_kernels::printGPU(batch[j].D_j,batch[j].clayer,batch[j].bsize);
cudaDeviceSynchronize();//printf("------------------>\n");
printf("Ejj=W' * Dj; \n");
printf("diff1%d = sum(sum(round(Ejj(1:%d,:) - Djj)))\n",j-1,network[j-1].clayer-1);
}
}
/*
* Final step for delta computation.
* //D[jj] = D[jj] .* F.D(W[j] * A(j))
*/
for(int j = 1; j < layers-1; j++){
dim3 dgrid((batch[j].bsize - 1) / TTILE + 1, (batch[j].clayer - 1) / TTILE + 1);
dim3 dblock(TTILE, TTILE);
if(DEBUG_GNN){
printf("Djj=");
gnn_kernels::printGPU(batch[j].D_j,batch[j].clayer,batch[j].bsize);
cudaDeviceSynchronize(); //printf("------------------>\n");
printf(";Wj=");
gnn_kernels::printGPU(network[j-1].W_j,network[j-1].nlayer,network[j-1].clayer);
cudaDeviceSynchronize();//printf("------------------>\n");
printf(";Aj=");
gnn_kernels::printGPU(batch[j-1].A_j,batch[j-1].clayer,batch[j-1].bsize);
cudaDeviceSynchronize();//printf("------------------>\n");
}
if(strcmp(F.TAG,"Sigmoid")!=0){
gnn_kernels::hmprod_mmul<DATA_T,ACT_F,TTILE><<<dgrid, dblock>>>(
batch[j].D_j,
network[j-1].W_j,
batch[j-1].A_j,
F,
network[j-1].nlayer,
network[j-1].clayer-1,
batch[j-1].bsize
);
}else{
gnn_kernels::hmprod_mmul_sigmoid<DATA_T,ACT_F,TTILE><<<dgrid, dblock>>>(
batch[j].D_j,
batch[j].A_j,
network[j-1].nlayer,
network[j-1].clayer-1,
batch[j-1].bsize
);
}
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing tmmul kernel");//TODO not necessary
if(DEBUG_GNN){
printf(";Ejj=");
gnn_kernels::printGPU(batch[j].D_j,batch[j].clayer,batch[j].bsize);
cudaDeviceSynchronize(); //printf("------------------>\n");
printf(";Djj = Djj .* act(Wj,Aj,1);\n");
printf("diff2%d=sum(sum(round(Ejj - Djj)))\n",j);
}
}
/*
* Weight and bias update
* W[j] = W[j] + (lrate/bsize) * Sum( D[jj] <> A[j] )
*/
for(int j = 0;j<layers-1; j++){
dim3 grid((network[j].clayer - 1)/TTILE + 1, (network[j].nlayer - 1)/TTILE + 1 );
dim3 block(TTILE,TTILE);
if(DEBUG_GNN){
printf("Wj=");
gnn_kernels::printGPU(network[j].W_j,network[j].nlayer,network[j].clayer);
cudaDeviceSynchronize();
printf(";Djj=");
gnn_kernels::printGPU(batch[j+1].D_j,batch[j+1].clayer,batch[j+1].bsize);
cudaDeviceSynchronize();
printf(";Aj=");
gnn_kernels::printGPU(batch[j].A_j,batch[j].clayer,batch[j].bsize);
cudaDeviceSynchronize();
}
gnn_kernels::tvecpvec<DATA_T,TTILE><<<grid,block>>>(
network[j].W_j,
batch[j+1].D_j,
batch[j].A_j,
network[j].nlayer,
batch[j].bsize,
network[j].clayer-1,
this->lrate
);
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing tvecpvec kernel");//TODO not needed to wait
if(DEBUG_GNN){
printf("Ej=");
gnn_kernels::printGPU(network[j].W_j,network[j].nlayer,network[j].clayer);
cudaDeviceSynchronize();
printf("diff3%d=sum(sum(round(Ej-tvecpvec(Wj,Djj,Aj,%f,%d))))\n",j,this->lrate,batch[j].bsize);
}
}
}
}
template<typename DATA_T, typename ACT_F>
void GNeuralNetwork<DATA_T,ACT_F>::classify(){
cudaSetDevice(CUDA_DEVICE);
if(network == NULL) vz::error("Network architecture missing. Use createLayers first!");
if(bsize == 0) vz::error("Batch size not set. Use setBatchSize first!");
unsigned int nbatch = this->transpose ? dimT.first / this->bsize : dimT.second / this->bsize;
unsigned int count =0;
if(DEBUG_GNN) std::cout<<dimT.first << "," << dimT.second << std::endl;
if(DEBUG_GNN) std::cout<< "nbatch: " << nbatch << std::endl;
DATA_T *Y,*A;
allocHostMem<DATA_T>(&Y,sizeof(DATA_T)*this->bsize*batch[layers-1].clayer,"Error allocating mem for Y in classify");
allocHostMem<DATA_T>(&A,sizeof(DATA_T)*this->bsize*batch[layers-1].clayer,"Error allocating mem for A in classify");
for(int i = 0; i< nbatch; i++){
/*
* Load current batch of training examples.
*/
if(this->transpose){
unsigned int bRow = i * this->bsize;
dim3 lgrid((batch[0].clayer-1)/TTILE + 1, (batch[0].bsize-1)/TTILE + 1);
dim3 lblock(TTILE,TTILE);
//print_grid(lgrid,lblock);
gnn_kernels::loadT<DATA_T,TTILE><<<lgrid,lblock>>>(
batch[0].A_j,dTest,
batch[0].clayer,batch[0].bsize,
dimT.first,dimT.second,
bRow,0);
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing loadT for X batch on classify");
}
/*
* Neural network feed forward step.
* - W = ( nlayer x (clayer + 1) ), A(i) = ( clayer x bsize ) , A(i+1) = (nlayer x bsize)
* A[jj] = A[j] * W[j]
*/
for(int j = 0;j < this->layers - 1;j++){
dim3 agrid((batch[j+1].bsize - 1)/TTILE + 1, (batch[j+1].clayer - 1)/TTILE + 1);
dim3 ablock(TTILE,TTILE);
gnn_kernels::mmul<DATA_T,ACT_F,TTILE><<<agrid,ablock>>>
(
batch[j+1].A_j,
network[j].W_j,
batch[j].A_j,
F,
network[j].nlayer,
network[j].clayer - 1,// Ignore bias vector from the multiplication//
batch[j].bsize
);
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing batch activation");
}
unsigned int bRow = i * this->bsize;
dim3 lgrid((batch[layers-1].clayer-1)/TTILE + 1, (batch[layers-1].bsize-1)/TTILE + 1);
dim3 lblock(TTILE,TTILE);
gnn_kernels::loadT<DATA_T,TTILE><<<lgrid,lblock>>>(
batch[layers-1].Y,dTest,
batch[layers-1].clayer,batch[layers-1].bsize,
dimT.first,dimT.second,
bRow,batch[0].clayer
);
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing loadT for Y batch on classify");
safeCpyToHost<DATA_T>(Y,batch[layers-1].Y,sizeof(DATA_T)*batch[layers-1].clayer*this->bsize,"Error transferring Y from GPU");
safeCpyToHost<DATA_T>(A,batch[layers-1].A_j,sizeof(DATA_T)*batch[layers-1].clayer*this->bsize,"Error transferring A_j from GPU");
for(int x=0; x < bsize; x++){
DATA_T maxY = 0, maxA=0;
int indexY = 0, indexA=0;
for(int y = 0; y < batch[layers-1].clayer; y++){
if(Y[y * bsize + x] > maxY){ maxY = Y[y * bsize + x]; indexY = y;}
if(A[y * bsize + x] > maxA){ maxA = A[y * bsize + x]; indexA = y;}
}
if(indexY == indexA ) count++;
}
}
printf("Accuracy: %2.f, %u, %lu\n",(((float)count)/dimT.first)*100,count, dimT.first);
cudaFreeHost(Y); cudaFreeHost(A);
}
/*
* Testing methods
*/
template<typename DATA_T, typename ACT_F>
void GNeuralNetwork<DATA_T,ACT_F>::bench_act(){
cudaSetDevice(0);
dim3 block(512,1,1);
dim3 grid(128,1,1);
/*
* Warm up device
*/
gnn_kernels::bench_test_activation<ACT_F><<<grid,block>>>(this->F);
cudaDeviceSynchronize();
/* <END> */
std::string msg("Benchmark ");
msg.append(F.TAG);
Time<millis> t;
t.start();
gnn_kernels::bench_test_activation<ACT_F><<<grid,block>>>(F);
cudaDeviceSynchronize();
t.lap(msg);
}
template<typename DATA_T, typename ACT_F>
void GNeuralNetwork<DATA_T,ACT_F>::print_weights(){
DATA_T *cW_j;
cudaSetDevice(0);
for(int i = 0;i < layers-1;i++){
unsigned int size = network[i].nlayer * network[i].clayer;
allocHostMem<DATA_T>(&cW_j,sizeof(DATA_T)*size, "Error Allocating Host Weight Matrix");
safeCpyToHost<DATA_T>(cW_j,network[i].W_j,sizeof(DATA_T)*size, "Error Allocating Copying Weight Matrix From Device");
printf("W%d=[",i);
for(int j = 0;j<size;j++){
std::cout<<cW_j[j] << " ";
if((j+1)%network[i].clayer == 0) std::cout<<std::endl;
}
printf("]");
std::cout<<std::endl;
}
for(int i = 0;i < layers-1;i++) printf("A%d=act(W%d,A%d,0)\n",i+1,i,i);
}
template<typename DATA_T, typename ACT_F>
void GNeuralNetwork<DATA_T,ACT_F>::bench_test_kernels(UnitTest test,unsigned int m, unsigned int n, unsigned int k,
bool debug){
unsigned int nlayer = m, clayer = n + 1, bsize = k;
DATA_T *hostA, *hostB, *hostC, *hostD;
DATA_T *devA, *devB, *devC;
allocDevMem<DATA_T>(&devA,sizeof(DATA_T) * nlayer * clayer, "Error allocating devA memory");
allocDevMem<DATA_T>(&devB,sizeof(DATA_T) * clayer * bsize, "Error allocating devB memory");
allocDevMem<DATA_T>(&devC,sizeof(DATA_T) * nlayer * bsize, "Error allocating devC memory");
allocHostMem<DATA_T>(&hostA,sizeof(DATA_T) * nlayer * clayer, "Error allocating devA memory");
allocHostMem<DATA_T>(&hostB,sizeof(DATA_T) * clayer * bsize, "Error allocating devB memory");
allocHostMem<DATA_T>(&hostC,sizeof(DATA_T) * nlayer * bsize, "Error allocating devC memory");
dim3 rgrid;
dim3 rblock = block_1D(256);
rgrid = grid_1D(nlayer * clayer,256); gnn_kernels::randomWeights<DATA_T><<<rgrid,rblock>>>(devA,nlayer, clayer);
rgrid = grid_1D(clayer * bsize,256); gnn_kernels::randomWeights<DATA_T><<<rgrid,rblock>>>(devB,clayer, bsize);
rgrid = grid_1D(nlayer * bsize,256); gnn_kernels::randomWeights<DATA_T><<<rgrid,rblock>>>(devC,nlayer,bsize);
if(test == MMUL){
dim3 agrid((bsize - 1)/TTILE + 1, (nlayer - 1)/TTILE + 1);
dim3 ablock(TTILE,TTILE);
Time<millis> t;
t.start();
gnn_kernels::mmul<DATA_T,ACT_F,TTILE><<<agrid,ablock>>>
(
devC,
devA,
devB,
F,
nlayer,
clayer - 1,
bsize
);
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing batch mmul");
t.lap("GPU serial mmul elapsed time");
allocHostMem<DATA_T>(&hostD,sizeof(DATA_T) * nlayer * bsize, "Error allocating devC memory");
safeCpyToHost<DATA_T>(hostA,devA,sizeof(DATA_T)*nlayer*clayer,"Error copying devA to host");
safeCpyToHost<DATA_T>(hostB,devB,sizeof(DATA_T)*clayer*bsize,"Error copying devB to host");
safeCpyToHost<DATA_T>(hostC,devC,sizeof(DATA_T)*nlayer*bsize,"Error copying devC to host");
/*t.start();
for(int x = 0; x < nlayer; x++){//3
for (int y = 0; y < bsize; y++){//3
hostD[x * bsize + y] = hostA[x * (clayer) + clayer - 1];
for (int z = 0; z < clayer - 1; z++){//2
hostD[x * bsize + y] += hostA[x * (clayer) + z] * hostB[z * bsize + y];
}
hostD[x * bsize + y] = F.f(hostD[x * bsize + y]);
}
}
if(!debug) t.lap("CPU serial mmul elapsed time");
if(debug){
gnn_kernels::printGPU(devC,m,k);
cudaDeviceSynchronize(); printf("<----->\n");
gnn_kernels::printGPU(devA,m,n);
cudaDeviceSynchronize(); printf("<----->\n");
gnn_kernels::printGPU(devB,n,k);
cudaDeviceSynchronize(); printf("<----->\n");
for(int x = 0; x<m * k;x++){
printf("%.4f ", hostD[x]);
if((x+1)%k==0) printf("\n");
}
}else{
for(int x = 0; x<nlayer * bsize;x++){
if(((hostD[x] - hostC[x]) > 0.001 )){
printf("Result matrices do not match(%f,%f)!!!\n",hostD[x],hostC[x] );
}
}
}
cudaFreeHost(hostD);*/
}else if(test == TMMUL){
// devB = devA * devC
// (n x k) = (m x n) (m x k) <=> (n x k) = (m x n)^T (m x k) <=> (n x k) = (n x m) (m x k)
Time<millis> t;
dim3 agrid((bsize - 1)/TTILE + 1, (clayer - 1)/TTILE + 1);
dim3 ablock(TTILE,TTILE);
t.start();
gnn_kernels::tmmul<DATA_T,TTILE><<<agrid,ablock>>>(
devB,//n
devA,//
devC,//
clayer,
nlayer,
bsize
);
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing tmmul kernel");
t.lap("GPU serial tmmul elapsed time");
allocHostMem<DATA_T>(&hostD,sizeof(DATA_T) * clayer * bsize, "Error allocating devC memory");
safeCpyToHost<DATA_T>(hostA,devA,sizeof(DATA_T)*nlayer*clayer,"Error copying devA to host");
safeCpyToHost<DATA_T>(hostB,devB,sizeof(DATA_T)*clayer*bsize,"Error copying devB to host");
safeCpyToHost<DATA_T>(hostC,devC,sizeof(DATA_T)*nlayer*bsize,"Error copying devC to host");
/*t.start();
for(int x = 0; x < clayer; x++){//3
for (int y = 0; y < bsize; y++){//3
hostD[x * bsize + y] = 0.0;
for (int z = 0; z < nlayer; z++){//2
hostD[x * bsize + y] += hostA[z * clayer + x] * hostC[z * bsize + y];
}
}
}
if(!debug) t.lap("CPU serial mmul elapsed time");
if(debug){
//print_grid(agrid,ablock);
//gnn_kernels::printGPU(devA,nlayer,clayer);
//cudaDeviceSynchronize(); printf("<----->\n");
//gnn_kernels::printGPU(devC,nlayer,bsize);
//cudaDeviceSynchronize(); printf("<----->\n");
gnn_kernels::printGPU(devB,clayer,bsize);
cudaDeviceSynchronize();
for(int x = 0; x<clayer * bsize;x++){
printf("%.4f ", hostD[x]);
if((x+1)%k==0) printf("\n");
}
}else{
for(int x = 0; x<clayer * bsize;x++){
if(((hostD[x] - hostB[x]) > 0.001 )){
printf("Result matrices do not match(%f,%f)!!!\n",hostD[x],hostB[x] );
}
}
}*/
cudaFreeHost(hostD);
}else if (test == MHPROD){
dim3 dgrid((bsize - 1) / TTILE + 1, (clayer - 1) / TTILE + 1);
dim3 dblock(TTILE, TTILE);
if(debug){
printf("D=");
gnn_kernels::printGPU(devC,nlayer,bsize);
cudaDeviceSynchronize();
printf("W=");
gnn_kernels::printGPU(devA,nlayer,clayer);
cudaDeviceSynchronize();
printf("A=");
gnn_kernels::printGPU(devB,clayer-1,bsize);
cudaDeviceSynchronize();
}
Time<millis> t;
t.start();
gnn_kernels::hmprod_mmul<DATA_T,ACT_F,TTILE><<<dgrid, dblock>>>(
devC,
devA,
devB,
F,
nlayer,
clayer-1,
bsize
);
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing hmprod_tmmul kernel");
t.lap("GPU serial hmprod elapsed time");
allocHostMem<DATA_T>(&hostD,sizeof(DATA_T) * nlayer * bsize, "Error allocating devC memory");
safeCpyToHost<DATA_T>(hostA,devA,sizeof(DATA_T)*nlayer*clayer,"Error copying devA to host");
safeCpyToHost<DATA_T>(hostB,devB,sizeof(DATA_T)*clayer*bsize,"Error copying devB to host");
safeCpyToHost<DATA_T>(hostC,devC,sizeof(DATA_T)*nlayer*bsize,"Error copying devC to host");
/*for(int x = 0; x < nlayer; x++){//3
for (int y = 0; y < bsize; y++){//3
hostD[x * bsize + y] = hostA[x * (clayer) + clayer - 1];
for (int z = 0; z < clayer - 1; z++){//2
hostD[x * bsize + y] += hostA[x * (clayer) + z] * hostB[z * bsize + y];
}
hostD[x * bsize + y] = F.f(hostD[x * bsize + y]);
}
}*/
if(debug){
printf("R=");
gnn_kernels::printGPU(devC,nlayer,bsize);
cudaDeviceSynchronize();
for(int x = 0; x<nlayer * bsize;x++){
printf("%.4f ", hostD[x]);
if((x+1)%k==0) printf("\n");
}
}
}else if( test == TVECPVEC ){
dim3 grid((clayer - 1)/TTILE + 1, (nlayer - 1)/TTILE + 1 );
dim3 block(TTILE,TTILE);
if(false){
//print_grid(grid,block);
printf("W=");
gnn_kernels::printGPU(devA,nlayer,clayer);
cudaDeviceSynchronize();
printf("D=");
gnn_kernels::printGPU(devC,nlayer,bsize);
cudaDeviceSynchronize();
printf("A=");
gnn_kernels::printGPU(devB,clayer-1,bsize);
cudaDeviceSynchronize();
printf("E=tvecpvec(W,D,A,%f,%d)\n",0.3233,bsize);
}
Time<millis> t;
t.start();
gnn_kernels::tvecpvec<DATA_T,TTILE><<<grid,block>>>(
devA,
devC,
devB,
nlayer,
bsize,
clayer-1,
0.0231
);
handleDeviceErrors(cudaDeviceSynchronize(),"Error executing tvecpvec kernel");
t.lap("GPU serial tvecpvec elapsed time");
if(debug){
//printf("R=");
gnn_kernels::printGPU(devA,nlayer,clayer);
cudaDeviceSynchronize();
//printf("round(R-E)\n");
}
}
cudaFree(devA); cudaFree(devB); cudaFree(devC);
cudaFreeHost(hostA); cudaFreeHost(hostB); cudaFreeHost(hostC); cudaFreeHost(hostD);
cudaDeviceReset();
}
template class GNeuralNetwork<float,gnn_actf::Sigmoid>;
template class GNeuralNetwork<float,gnn_actf::FSigmoid>;
template class GNeuralNetwork<float,gnn_actf::Arctan>;
//template class GNeuralNetwork<double,gnn_actf::Sigmoid>;
//template class GNeuralNetwork<double,gnn_actf::FSigmoid>;
//template class GNeuralNetwork<double,gnn_actf::Arctan>;
}
|
5decb59f766684b0cbf6e842ebbe5a4bee07e5a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header.h"
extern "C" void set_dev_mem_d(int h1d, int h2d, int h3d, int p4d, int p5d,int p6d)
{
int size_t3;
size_t3 = h1d*h2d*h3d*p4d*p5d*p6d;
t3_d = (double *) getGpuMem(size_t3*sizeof(double));
hipMemset(t3_d,0,size_t3*sizeof(double));
}
extern "C" void
dev_mem_d_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d)
{
set_dev_mem_d((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d);
}
extern "C" void
dev_release()
{
freeGpuMem(t3_d);
freeGpuMem(t3_s_d);
}
extern "C" void
dev_release_()
{
dev_release();
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,p6,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_1_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int p6ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p6_0*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p6_0*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p6_0*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p6_1*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p6_1*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p6_1*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p6_2*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p6_2*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p6_2*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p6_3*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p6_3*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p6_3*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_1_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p6ld_triplesx,p5ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h3d*h1d*p6d*p5d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_1_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
p6ld_triplesx=h1d*h3d;
p5ld_triplesx=p6d*h1d*h3d;
p4ld_triplesx=p5d*p6d*h1d*h3d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_1_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p6ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_1_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,h2,p5,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_2_kernel(int h1d,int h2d,int h3d,int h7d,int p4d,int p5d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h2ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int h2ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
h2_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
h2_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
h2_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
h2_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_2_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h2d=h2d*p6d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h3d*h1d*h2d*p5d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h2d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_2_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
//CUDA_SAFE(
hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice); //);
//CUDA_SAFE(
hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice); //);
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h2ld_v2sub=h3d;
h7ld_v2sub=h2d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
h2ld_triplesx=h1d*h3d;
p5ld_triplesx=h2d*h1d*h3d;
p4ld_triplesx=p5d*h2d*h1d*h3d;
int total_x = h3d*h2d*1;
int total_y = p4d*p5d*h1d;
//printf("Blocks %d %d\n", total_x, total_y);
//fflush(stdout);
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_2_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,h7d,p4d,p5d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_2_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h1,h3,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_3_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h7ld_v2sub,int h1ld_triplesx,int h3ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
h3_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
h3_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
h3_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
h3_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_3_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
h3d=h3d*p6d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h1d*h3d*p5d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_3_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h7ld_v2sub=h3d;
h1ld_triplesx=1;
h3ld_triplesx=h1d;
p5ld_triplesx=h3d*h1d;
p4ld_triplesx=p5d*h3d*h1d;
int total_x = h3d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_3_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h3d,h7d,p4d,p5d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_3_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_4_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,int p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_4_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h3d*h1d*p5d*p4d*p6d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_4_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
p5ld_triplesx=h1d*h3d;
p4ld_triplesx=p5d*h1d*h3d;
p6ld_triplesx=p4d*p5d*h1d*h3d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_4_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_4_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,h2,p5,p4,p6] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_5_kernel(int h1d,int h2d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h2ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int h2ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,int p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h2_0=rest_x%h2d;
rest_x=rest_x/h2d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h2_1=rest_x%h2d;
rest_x=rest_x/h2d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h2_2=rest_x%h2d;
rest_x=rest_x/h2d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h2_3=rest_x%h2d;
rest_x=rest_x/h2d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_5_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h3d*h1d*h2d*p5d*p4d*p6d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h2d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_5_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h2ld_v2sub=h3d;
p6ld_v2sub=h2d*h3d;
h7ld_v2sub=p6d*h2d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
h2ld_triplesx=h1d*h3d;
p5ld_triplesx=h2d*h1d*h3d;
p4ld_triplesx=p5d*h2d*h1d*h3d;
p6ld_triplesx=p4d*p5d*h2d*h1d*h3d;
int total_x = h3d*h2d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_5_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_5_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h1,h3,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_6_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h1ld_triplesx,int h3ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,int p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_6_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h1d*h3d*p5d*p4d*p6d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_6_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h1ld_triplesx=1;
h3ld_triplesx=h1d;
p5ld_triplesx=h3d*h1d;
p4ld_triplesx=p5d*h3d*h1d;
p6ld_triplesx=p4d*p5d*h3d*h1d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_6_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_6_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_7_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int p5ld_triplesx,int p6ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_7_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h3d*h1d*p5d*p6d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_7_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
p5ld_triplesx=h1d*h3d;
p6ld_triplesx=p5d*h1d*h3d;
p4ld_triplesx=p6d*p5d*h1d*h3d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_7_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_7_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,h2,p5,p6,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_8_kernel(int h1d,int h2d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h2ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int h2ld_triplesx,int p5ld_triplesx,int p6ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h2_0=rest_x%h2d;
rest_x=rest_x/h2d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h2_1=rest_x%h2d;
rest_x=rest_x/h2d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h2_2=rest_x%h2d;
rest_x=rest_x/h2d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h2_3=rest_x%h2d;
rest_x=rest_x/h2d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_8_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h3d*h1d*h2d*p5d*p6d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h2d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_8_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h2ld_v2sub=h3d;
p6ld_v2sub=h2d*h3d;
h7ld_v2sub=p6d*h2d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
h2ld_triplesx=h1d*h3d;
p5ld_triplesx=h2d*h1d*h3d;
p6ld_triplesx=p5d*h2d*h1d*h3d;
p4ld_triplesx=p6d*p5d*h2d*h1d*h3d;
int total_x = h3d*h2d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_8_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_8_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h1,h3,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_9_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h1ld_triplesx,int h3ld_triplesx,int p5ld_triplesx,int p6ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_9_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h1d*h3d*p5d*p6d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_9_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h1ld_triplesx=1;
h3ld_triplesx=h1d;
p5ld_triplesx=h3d*h1d;
p6ld_triplesx=p5d*h3d*h1d;
p4ld_triplesx=p6d*p5d*h3d*h1d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_9_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_9_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_1_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p4_3=rest_y;
p6_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]-=tlocal3;
t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]-=tlocal7;
t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]-=tlocal11;
t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]-=tlocal15;
t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_1_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
p6d=p6d*p5d;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h3d*h2d*h1d*p6d*p4d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_1_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
h3ld_t3=1;
h2ld_t3=h3d;
h1ld_t3=h2d*h3d;
p6ld_t3=h1d*h2d*h3d;
p4ld_t3=p6d*h1d*h2d*h3d;
int total_x = h3d*p6d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_1_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_1_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h1,h3,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_2_kernel(int h1d,int h2d,int h3d,int p4d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int h2ld_t3,int h1ld_t3,int h3ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p4_0=rest_y;
h3_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p4_1=rest_y;
h3_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p4_2=rest_y;
h3_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p4_3=rest_y;
h3_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3]-=tlocal3;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3]-=tlocal7;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3]-=tlocal11;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3]-=tlocal15;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
h3d=h3d*p6d;
h3d=h3d*p5d;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h1d*h3d*p4d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_2_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
h2ld_t3=1;
h1ld_t3=h2d;
h3ld_t3=h1d*h2d;
p4ld_t3=h3d*h1d*h2d;
int total_x = h3d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_2_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_2_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h3,h1,p6,p4] += t2[p7,p4,h1,h2] * v2[p7,h3,p6]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_3_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int h2ld_t3,int h3ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p4_3=rest_y;
p6_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]+=tlocal3;
t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]+=tlocal7;
t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]+=tlocal11;
t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]+=tlocal15;
t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_3_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
p6d=p6d*p5d;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h3d*h1d*p6d*p4d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_3_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
h2ld_t3=1;
h3ld_t3=h2d;
h1ld_t3=h3d*h2d;
p6ld_t3=h1d*h3d*h2d;
p4ld_t3=p6d*h1d*h3d*h2d;
int total_x = h3d*p6d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_3_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_3_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_4_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_4_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h3d*h2d*h1d*p6d*p4d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_4_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h3ld_t3=1;
h2ld_t3=h3d;
h1ld_t3=h2d*h3d;
p6ld_t3=h1d*h2d*h3d;
p4ld_t3=p6d*h1d*h2d*h3d;
p5ld_t3=p4d*p6d*h1d*h2d*h3d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_4_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_4_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h1,h3,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_5_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p5ld_v2,int h2ld_t3,int h1ld_t3,int h3ld_t3,int p4ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_5_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
h3d=h3d*p6d;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h1d*h3d*p4d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p5d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_5_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p5ld_v2=h3d*p7d;
h2ld_t3=1;
h1ld_t3=h2d;
h3ld_t3=h1d*h2d;
p4ld_t3=h3d*h1d*h2d;
p5ld_t3=p4d*h3d*h1d*h2d;
int total_x = h3d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_5_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_5_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h3,h1,p6,p4,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_6_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h2ld_t3,int h3ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]-=tlocal3;
t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]-=tlocal7;
t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]-=tlocal11;
t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]-=tlocal15;
t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_6_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h3d*h1d*p6d*p4d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_6_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h2ld_t3=1;
h3ld_t3=h2d;
h1ld_t3=h3d*h2d;
p6ld_t3=h1d*h3d*h2d;
p4ld_t3=p6d*h1d*h3d*h2d;
p5ld_t3=p4d*p6d*h1d*h3d*h2d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_6_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR();
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_6_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_7_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p4ld_t3,int p6ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_7_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h3d*h2d*h1d*p4d*p6d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_7_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h3ld_t3=1;
h2ld_t3=h3d;
h1ld_t3=h2d*h3d;
p4ld_t3=h1d*h2d*h3d;
p6ld_t3=p4d*h1d*h2d*h3d;
p5ld_t3=p6d*p4d*h1d*h2d*h3d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_7_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_7_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h1,h3,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_8_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h2ld_t3,int h1ld_t3,int h3ld_t3,int p4ld_t3,int p6ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_8_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p6ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h1d*h3d*p4d*p6d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_8_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h2ld_t3=1;
h1ld_t3=h2d;
h3ld_t3=h1d*h2d;
p4ld_t3=h3d*h1d*h2d;
p6ld_t3=p4d*h3d*h1d*h2d;
p5ld_t3=p6d*p4d*h3d*h1d*h2d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_8_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_8_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h3,h1,p4,p6,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_9_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h2ld_t3,int h3ld_t3,int h1ld_t3,int p4ld_t3,int p6ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal3;
t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal7;
t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal11;
t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal15;
t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_9_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h3d*h1d*p4d*p6d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_9_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h2ld_t3=1;
h3ld_t3=h2d;
h1ld_t3=h3d*h2d;
p4ld_t3=h1d*h3d*h2d;
p6ld_t3=p4d*h1d*h3d*h2d;
p5ld_t3=p6d*p4d*h1d*h3d*h2d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_9_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
extern "C" void sd_t_d2_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_9_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
#define MAX_h3 64
/* IMPORTANT!!!!
t3_d must be passed as parameter to kernel function. A __global__ function can't access the global variable directly*/
__global__ void compute_energy_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,double* eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, double* energy, double factor, int total_size, double* t3d, double* t3_sd)
{
int h1,h2,p6,p4,p5, h3,i=0;
double e1,e2,e4,e5,e6;
// __shared__ double t2_shm[MAX_h3];
__shared__ double energy_s[T1];
__shared__ double energy2_s[T1];
double inner_fac;
int limit;
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
if(threadIdx.x==0)
{
energy[blockIdx.x]=0;
energy[blockIdx.x+gridDim.x]=0;
energy_s[threadIdx.x] = 0.0;
energy2_s[threadIdx.x] = 0.0;
}
for(int j =0; j<T2*T1;j++) {
thread_x = T2*T1*blockIdx.x + j;
rest_x = thread_x;
__syncthreads();
h2=rest_x%h2d;
rest_x=rest_x/h2d;
h1=rest_x%h1d;
rest_x=rest_x/h1d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
rest_x=rest_x/p5d;
p4=rest_x%p4d;
e1 = eval1[h1];
e2 = eval2[h2];
e4 = eval4[p4];
e5 = eval5[p5];
e6 = eval6[p6];
/*
for(p4=0;p4<p4d;p4++)
for(p5 = 0;p5<p5d;p5++)
for(p6=0;p6<p6d;p6++)
for(h1= 0;h1<h1d;h1++)
for(h2=0;h2<h2d;h2++)
for(h3=0;h3<h3d;h3++) {
inner_fac = -eval4[p4]-eval5[p5]-eval6[p6]+eval1[h1]
+eval2[h2]+eval3[h3];
energy_s[0]+=factor*t3d[i]*t3d[i]/inner_fac;
energy2_s[0]+=factor*t3d[i]*(t3_sd[i]+t3d[i])/inner_fac;
i++;
}
*/
if(thread_x<total_size)
for(int i=0;i<h3d;i++)
{
inner_fac = -e4-e5-e6+e1+e2+eval3[i]; //t2_shm[i];
//ckbn avoid e1 in case we need just (T)
energy_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*t3d[thread_x*h3d+i]/inner_fac;
energy2_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*(t3_sd[thread_x*h3d+i]+t3d[thread_x*h3d+i])/inner_fac;
}
__syncthreads();
}
if(threadIdx.x==0)
{
/* limit = blockDim.x;
if (blockIdx.x == (gridDim.x-1)) limit = total_size%blockDim.x;
for(int i=0;i<limit;i++)
{
energy[blockIdx.x]+=energy_s[i];
energy[blockIdx.x+gridDim.x]+=energy2_s[i];
}
*/
energy[blockIdx.x] = energy_s[0];
energy[blockIdx.x+gridDim.x] = energy2_s[0];
}
__syncthreads();
}
extern "C" void compute_energy(double factor, double* energy, double* eval1, double* eval2,double* eval3,double* eval4,double* eval5,double* eval6,int h1d, int h2d, int h3d, int p4d, int p5d,int p6d, double* host1, double* host2)
//ckbn en_comment, double* total_d, double* total_s)
{
double* energy_d, *energy_h;
double* eval_d1,*eval_d2,*eval_d3,*eval_d4,*eval_d5,*eval_d6;
int size_energy = 2*sizeof(double);
int total_block = DIV_UB((h1d*h2d*p4d*p5d*p6d), (T2*T1));
// int total_block = 1;
int total_elements = h1d*h2d*p4d*p5d*p6d;
energy_d = (double*)getGpuMem(size_energy*total_block*2);
int i=0,in;
double* t3 = (double*)malloc(sizeof(double)*h3d*total_elements);
double* ts3 = (double*)malloc(sizeof(double)*h3d*total_elements);
energy_h = (double*)getHostMem(size_energy*2*total_block);
eval_d1 = (double*)getGpuMem(h1d*sizeof(double));
eval_d2 = (double*)getGpuMem(h2d*sizeof(double));
eval_d3 = (double*)getGpuMem(h3d*sizeof(double));
eval_d4 = (double*)getGpuMem(p4d*sizeof(double));
eval_d5 = (double*)getGpuMem(p5d*sizeof(double));
eval_d6 = (double*)getGpuMem(p6d*sizeof(double));
CUDA_SAFE(hipMemcpy(eval_d1, eval1, h1d*sizeof(double), hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(eval_d2, eval2, h2d*sizeof(double), hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(eval_d3, eval3, h3d*sizeof(double), hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(eval_d4, eval4, p4d*sizeof(double), hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(eval_d5, eval5, p5d*sizeof(double), hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(eval_d6, eval6, p6d*sizeof(double), hipMemcpyHostToDevice));
/* for test only */
//printf("host 2 is %f %f\n", host2[0], host2[1]);
// CUDA_SAFE(hipMemcpy(t3_s_d, host2, total_elements*h3d*sizeof(double), hipMemcpyHostToDevice));
dim3 dimBlock(1); //T2*T1);
dim3 dimGrid(total_block);
hipLaunchKernelGGL(( compute_energy_kernel), dim3(dimGrid),dim3(dimBlock),0, 0, h1d,h2d,h3d,p4d,p5d,p6d, eval_d1,eval_d2,eval_d3,eval_d4,eval_d5,eval_d6,energy_d, factor, h1d*h2d*p4d*p5d*p6d, t3_d, t3_s_d);
hipDeviceSynchronize();
//CHECK_ERR("Kernel execution failed");
CUDA_SAFE(hipMemcpy(((char *) energy_h) , ((char *) energy_d) ,
size_energy*total_block*2, hipMemcpyDeviceToHost));
for(int i=1;i<dimGrid.x;i++)
{
energy_h[0]+=energy_h[i];
energy_h[dimGrid.x]+=energy_h[i+dimGrid.x];
}
// printf("CUDA energy_h is %f %f %d %d %d %d %d %d\n", energy_h[0], energy_h[dimGrid.x]); //, total_size, h1d, h2d, p4d, p5d,p6d);
/*
CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_d) , sizeof(double)*h3d*total_elements, hipMemcpyDeviceToHost));
CUDA_SAFE(hipMemcpy(((char *) ts3) , ((char *) t3_s_d) , sizeof(double)*h3d*total_elements, hipMemcpyDeviceToHost));
total_s[0]=0.0, total_d[0]=0.0;
for(int i=0;i<h3d*total_elements;i++) {
total_s[0] += ts3[i];
total_d[0] += t3[i];
}
*/
// printf("Total doubles and singles %f, %f\n", total_d, total_s);
energy[0] = energy_h[0];
energy[1] = energy_h[dimGrid.x];
freeGpuMem(energy_d);
freeGpuMem(eval_d1);
freeGpuMem(eval_d2);
freeGpuMem(eval_d3);
freeGpuMem(eval_d4);
freeGpuMem(eval_d5);
freeGpuMem(eval_d6);
freeHostMem(energy_h);
}
extern "C" void
compute_en_(double * factor, double * energy, double * eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double* host1, double* host2)
//ckbn en_comment,double* total_d, double* total_s)
{
compute_energy((double) *factor, energy, eval1,eval2, eval3, eval4, eval5, eval6,(int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, host1, host2);
//ckbn en_comment ,total_d, total_s);
}
//__device__ double* t3_d;
extern "C" void set_dev_mem_s(int h1d, int h2d, int h3d, int p4d, int p5d,int p6d)
{
int size_t3;
size_t3 = h1d*h2d*h3d*p4d*p5d*p6d;
t3_s_d = (double *) getGpuMem(size_t3*sizeof(double));
hipMemset(t3_s_d,0,size_t3*sizeof(double));
}
extern "C" void
dev_mem_s_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d)
{
set_dev_mem_s((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_1_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3, double *t2_d, double *v2_d,int p4, int total_x, double* t3d) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_1_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
//CUDA_SAFE(hipMalloc((void**) &t3_d, size_t3));
//CUDA_SAFE(hipMalloc((void**) &t2_d, size_t2));
//CUDA_SAFE(hipMalloc((void**) &v2_d, size_v2));
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h2ld_t3 = h3d;
h1ld_t3 = h2d * h3d;
p6ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p4ld_t3 = p5d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_1_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d);
CHECK_ERR("Kernel execution failed");
}
/*
st = timer();
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}
*/
hipDeviceSynchronize();
// CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
// hipFree(t2_d);
// hipFree(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_1_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_1_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h1,h2,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_2_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t2_d, double *v2_d,int p4, int total_x, double* t3d) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}*/
//CUDA_SAFE(hipMalloc((void**) &t2_d, size_t2));
//CUDA_SAFE(hipMalloc((void**) &v2_d, size_v2));
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
/* assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}*/
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d ;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h1ld_t3 = h3d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p4ld_t3 = p5d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
// for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_2_kernel), dim3(dimGrid),dim3(dimBlock),0, 0, h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d);
CHECK_ERR("Kernel execution failed");
// }
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
hipDeviceSynchronize();
// CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
/*
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}*/
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
extern "C" void
sd_t_s1_2_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_2_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
extern "C" void
sd_t_s1_3_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d ;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h1ld_t3 = 1;
h3ld_t3 = h1d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p4ld_t3 = p5d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_1_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}
*/ hipDeviceSynchronize();
//CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_3_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_3_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_4_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_4_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
/* assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}*/
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
p5ld_v2 = p6d * h3d * h2d;
h3ld_t3 = 1;
h2ld_t3 = h3d;
h1ld_t3 = h2d * h3d;
p6ld_t3 = h1d * h2d * h3d;
p4ld_t3 = p6d * h1d * h2d * h3d;
p5ld_t3 = p4d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
i=0;
// for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_4_kernel), dim3(dimGrid),dim3(dimBlock),0, 0, h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
//sd_t_s1_4_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
// }
hipDeviceSynchronize();
/* CUDA_SAFE(hipMemcpy(((char *) t3_p) , ((char *) t3_d) , size_block_t3, hipMemcpyDeviceToHost));
printf("Time for Async DeviceToHost %f\n", et-st);
stream = 0;
// while (stream < nstreams) {
// while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = t3_p; //[stream * size_el_block_t3];
double *dst = t3; //[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] -= src[i];
}
// stream++;
// }
*/
// hipDeviceSynchronize();
/*
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}*/
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_4_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_4_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h1,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_5_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_5_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d ;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
p5ld_v2 = p6d * h3d * h2d;
h3ld_t3 = 1;
h1ld_t3 = h3d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
p4ld_t3 = p6d * h1d * h2d * h3d;
p5ld_t3 = p4d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_5_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}
*/
hipDeviceSynchronize();
//CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_5_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_5_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h1,h3,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_6_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_6_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
p5ld_v2 = p6d * h3d * h2d;
h1ld_t3 = 1;
h3ld_t3 = h1d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
p4ld_t3 = p6d * h1d * h2d * h3d;
p5ld_t3 = p4d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_6_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/* for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
hipDeviceSynchronize();
//CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_6_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_6_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_7_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_7_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h2ld_t3 = h3d;
h1ld_t3 = h2d * h3d;
p4ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p6ld_t3 = p4d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_7_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
hipDeviceSynchronize();
//CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_7_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_7_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_8_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
/*----------------------------------------------------------------------*
*t3[h3,h1,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
extern "C" void
sd_t_s1_8_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h1ld_t3 = h3d;
h2ld_t3 = h1d * h3d;
p4ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p6ld_t3 = p4d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_8_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
hipDeviceSynchronize();
// CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
extern "C" void
sd_t_s1_8_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_8_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h1,h3,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
extern "C" void
sd_t_s1_9_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h1ld_t3 = 1;
h3ld_t3 = h1d;
h2ld_t3 = h1d * h3d;
p4ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p6ld_t3 = p4d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_7_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
hipDeviceSynchronize();
//CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
// printf("out is %lf\n", t3_p[0]);
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
//freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
extern "C" void
sd_t_s1_9_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_9_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
| 5decb59f766684b0cbf6e842ebbe5a4bee07e5a4.cu | #include "header.h"
extern "C" void set_dev_mem_d(int h1d, int h2d, int h3d, int p4d, int p5d,int p6d)
{
int size_t3;
size_t3 = h1d*h2d*h3d*p4d*p5d*p6d;
t3_d = (double *) getGpuMem(size_t3*sizeof(double));
cudaMemset(t3_d,0,size_t3*sizeof(double));
}
extern "C" void
dev_mem_d_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d)
{
set_dev_mem_d((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d);
}
extern "C" void
dev_release()
{
freeGpuMem(t3_d);
freeGpuMem(t3_s_d);
}
extern "C" void
dev_release_()
{
dev_release();
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,p6,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_1_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int p6ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p6_0*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p6_0*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p6_0*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p6_1*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p6_1*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p6_1*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p6_2*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p6_2*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p6_2*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p6_3*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p6_3*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p6_3*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_1_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p6ld_triplesx,p5ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h3d*h1d*p6d*p5d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_1_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
p6ld_triplesx=h1d*h3d;
p5ld_triplesx=p6d*h1d*h3d;
p4ld_triplesx=p5d*p6d*h1d*h3d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p6ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_1_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,h2,p5,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_2_kernel(int h1d,int h2d,int h3d,int h7d,int p4d,int p5d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h2ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int h2ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
h2_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
h2_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
h2_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
h2_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_2_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h2d=h2d*p6d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h3d*h1d*h2d*p5d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h2d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_2_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
//CUDA_SAFE(
cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice); //);
//CUDA_SAFE(
cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice); //);
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h2ld_v2sub=h3d;
h7ld_v2sub=h2d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
h2ld_triplesx=h1d*h3d;
p5ld_triplesx=h2d*h1d*h3d;
p4ld_triplesx=p5d*h2d*h1d*h3d;
int total_x = h3d*h2d*1;
int total_y = p4d*p5d*h1d;
//printf("Blocks %d %d\n", total_x, total_y);
//fflush(stdout);
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_2_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,h7d,p4d,p5d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_2_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h1,h3,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_3_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h7ld_v2sub,int h1ld_triplesx,int h3ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
h3_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
h3_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
h3_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
h3_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_3_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
h3d=h3d*p6d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h1d*h3d*p5d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_3_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h7ld_v2sub=h3d;
h1ld_triplesx=1;
h3ld_triplesx=h1d;
p5ld_triplesx=h3d*h1d;
p4ld_triplesx=p5d*h3d*h1d;
int total_x = h3d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_3_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_3_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_4_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,int p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_4_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h3d*h1d*p5d*p4d*p6d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_4_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
p5ld_triplesx=h1d*h3d;
p4ld_triplesx=p5d*h1d*h3d;
p6ld_triplesx=p4d*p5d*h1d*h3d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_4_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_4_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,h2,p5,p4,p6] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_5_kernel(int h1d,int h2d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h2ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int h2ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,int p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h2_0=rest_x%h2d;
rest_x=rest_x/h2d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h2_1=rest_x%h2d;
rest_x=rest_x/h2d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h2_2=rest_x%h2d;
rest_x=rest_x/h2d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h2_3=rest_x%h2d;
rest_x=rest_x/h2d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_5_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h3d*h1d*h2d*p5d*p4d*p6d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h2d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_5_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h2ld_v2sub=h3d;
p6ld_v2sub=h2d*h3d;
h7ld_v2sub=p6d*h2d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
h2ld_triplesx=h1d*h3d;
p5ld_triplesx=h2d*h1d*h3d;
p4ld_triplesx=p5d*h2d*h1d*h3d;
p6ld_triplesx=p4d*p5d*h2d*h1d*h3d;
int total_x = h3d*h2d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_5_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_5_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h1,h3,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_6_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h1ld_triplesx,int h3ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,int p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_6_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h1d*h3d*p5d*p4d*p6d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_6_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h1ld_triplesx=1;
h3ld_triplesx=h1d;
p5ld_triplesx=h3d*h1d;
p4ld_triplesx=p5d*h3d*h1d;
p6ld_triplesx=p4d*p5d*h3d*h1d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_6_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_6_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_7_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int p5ld_triplesx,int p6ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_7_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h3d*h1d*p5d*p6d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_7_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
p5ld_triplesx=h1d*h3d;
p6ld_triplesx=p5d*h1d*h3d;
p4ld_triplesx=p6d*p5d*h1d*h3d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_7_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,h2,p5,p6,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_8_kernel(int h1d,int h2d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h2ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int h2ld_triplesx,int p5ld_triplesx,int p6ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h2_0=rest_x%h2d;
rest_x=rest_x/h2d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h2_1=rest_x%h2d;
rest_x=rest_x/h2d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h2_2=rest_x%h2d;
rest_x=rest_x/h2d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h2_3=rest_x%h2d;
rest_x=rest_x/h2d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_8_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h3d*h1d*h2d*p5d*p6d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h2d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_8_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h2ld_v2sub=h3d;
p6ld_v2sub=h2d*h3d;
h7ld_v2sub=p6d*h2d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
h2ld_triplesx=h1d*h3d;
p5ld_triplesx=h2d*h1d*h3d;
p6ld_triplesx=p5d*h2d*h1d*h3d;
p4ld_triplesx=p6d*p5d*h2d*h1d*h3d;
int total_x = h3d*h2d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_8_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_8_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h1,h3,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_9_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h1ld_triplesx,int h3ld_triplesx,int p5ld_triplesx,int p6ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_9_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *t2sub_d,*v2sub_d;
size_triplesx=h1d*h3d*p5d*p6d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_9_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h1ld_triplesx=1;
h3ld_triplesx=h1d;
p5ld_triplesx=h3d*h1d;
p6ld_triplesx=p5d*h3d*h1d;
p4ld_triplesx=p6d*p5d*h3d*h1d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_9_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_9_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_1_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p4_3=rest_y;
p6_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]-=tlocal3;
t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]-=tlocal7;
t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]-=tlocal11;
t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]-=tlocal15;
t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_1_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
p6d=p6d*p5d;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h3d*h2d*h1d*p6d*p4d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_1_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
h3ld_t3=1;
h2ld_t3=h3d;
h1ld_t3=h2d*h3d;
p6ld_t3=h1d*h2d*h3d;
p4ld_t3=p6d*h1d*h2d*h3d;
int total_x = h3d*p6d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_1_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h1,h3,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_2_kernel(int h1d,int h2d,int h3d,int p4d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int h2ld_t3,int h1ld_t3,int h3ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p4_0=rest_y;
h3_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p4_1=rest_y;
h3_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p4_2=rest_y;
h3_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p4_3=rest_y;
h3_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3]-=tlocal3;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3]-=tlocal7;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3]-=tlocal11;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3]-=tlocal15;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
h3d=h3d*p6d;
h3d=h3d*p5d;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h1d*h3d*p4d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_2_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
h2ld_t3=1;
h1ld_t3=h2d;
h3ld_t3=h1d*h2d;
p4ld_t3=h3d*h1d*h2d;
int total_x = h3d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_2_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_2_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h3,h1,p6,p4] += t2[p7,p4,h1,h2] * v2[p7,h3,p6]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_3_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int h2ld_t3,int h3ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p4_3=rest_y;
p6_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]+=tlocal3;
t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]+=tlocal7;
t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]+=tlocal11;
t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]+=tlocal15;
t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_3_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
p6d=p6d*p5d;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h3d*h1d*p6d*p4d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_3_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
h2ld_t3=1;
h3ld_t3=h2d;
h1ld_t3=h3d*h2d;
p6ld_t3=h1d*h3d*h2d;
p4ld_t3=p6d*h1d*h3d*h2d;
int total_x = h3d*p6d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_3_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_3_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_4_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_4_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h3d*h2d*h1d*p6d*p4d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_4_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h3ld_t3=1;
h2ld_t3=h3d;
h1ld_t3=h2d*h3d;
p6ld_t3=h1d*h2d*h3d;
p4ld_t3=p6d*h1d*h2d*h3d;
p5ld_t3=p4d*p6d*h1d*h2d*h3d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_4_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_4_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h1,h3,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_5_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p5ld_v2,int h2ld_t3,int h1ld_t3,int h3ld_t3,int p4ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_5_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
h3d=h3d*p6d;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h1d*h3d*p4d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p5d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_5_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p5ld_v2=h3d*p7d;
h2ld_t3=1;
h1ld_t3=h2d;
h3ld_t3=h1d*h2d;
p4ld_t3=h3d*h1d*h2d;
p5ld_t3=p4d*h3d*h1d*h2d;
int total_x = h3d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_5_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_5_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h3,h1,p6,p4,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_6_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h2ld_t3,int h3ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]-=tlocal3;
t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]-=tlocal7;
t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]-=tlocal11;
t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]-=tlocal15;
t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_6_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h3d*h1d*p6d*p4d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_6_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h2ld_t3=1;
h3ld_t3=h2d;
h1ld_t3=h3d*h2d;
p6ld_t3=h1d*h3d*h2d;
p4ld_t3=p6d*h1d*h3d*h2d;
p5ld_t3=p4d*p6d*h1d*h3d*h2d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_6_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR();
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_6_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_7_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p4ld_t3,int p6ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_7_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h3d*h2d*h1d*p4d*p6d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_7_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h3ld_t3=1;
h2ld_t3=h3d;
h1ld_t3=h2d*h3d;
p4ld_t3=h1d*h2d*h3d;
p6ld_t3=p4d*h1d*h2d*h3d;
p5ld_t3=p6d*p4d*h1d*h2d*h3d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_7_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h1,h3,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_8_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h2ld_t3,int h1ld_t3,int h3ld_t3,int p4ld_t3,int p6ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_8_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p6ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h1d*h3d*p4d*p6d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_8_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h2ld_t3=1;
h1ld_t3=h2d;
h3ld_t3=h1d*h2d;
p4ld_t3=h3d*h1d*h2d;
p6ld_t3=p4d*h3d*h1d*h2d;
p5ld_t3=p6d*p4d*h3d*h1d*h2d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_8_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_8_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h3,h1,p4,p6,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_9_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h2ld_t3,int h3ld_t3,int h1ld_t3,int p4ld_t3,int p6ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal3;
t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal7;
t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal11;
t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal15;
t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_9_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h3d*h1d*p4d*p6d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_9_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h2ld_t3=1;
h3ld_t3=h2d;
h1ld_t3=h3d*h2d;
p4ld_t3=h1d*h3d*h2d;
p6ld_t3=p4d*h1d*h3d*h2d;
p5ld_t3=p6d*p4d*h1d*h3d*h2d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_9_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
extern "C" void sd_t_d2_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_9_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
#define MAX_h3 64
/* IMPORTANT!!!!
t3_d must be passed as parameter to kernel function. A __global__ function can't access the global variable directly*/
__global__ void compute_energy_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,double* eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, double* energy, double factor, int total_size, double* t3d, double* t3_sd)
{
int h1,h2,p6,p4,p5, h3,i=0;
double e1,e2,e4,e5,e6;
// __shared__ double t2_shm[MAX_h3];
__shared__ double energy_s[T1];
__shared__ double energy2_s[T1];
double inner_fac;
int limit;
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
if(threadIdx.x==0)
{
energy[blockIdx.x]=0;
energy[blockIdx.x+gridDim.x]=0;
energy_s[threadIdx.x] = 0.0;
energy2_s[threadIdx.x] = 0.0;
}
for(int j =0; j<T2*T1;j++) {
thread_x = T2*T1*blockIdx.x + j;
rest_x = thread_x;
__syncthreads();
h2=rest_x%h2d;
rest_x=rest_x/h2d;
h1=rest_x%h1d;
rest_x=rest_x/h1d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
rest_x=rest_x/p5d;
p4=rest_x%p4d;
e1 = eval1[h1];
e2 = eval2[h2];
e4 = eval4[p4];
e5 = eval5[p5];
e6 = eval6[p6];
/*
for(p4=0;p4<p4d;p4++)
for(p5 = 0;p5<p5d;p5++)
for(p6=0;p6<p6d;p6++)
for(h1= 0;h1<h1d;h1++)
for(h2=0;h2<h2d;h2++)
for(h3=0;h3<h3d;h3++) {
inner_fac = -eval4[p4]-eval5[p5]-eval6[p6]+eval1[h1]
+eval2[h2]+eval3[h3];
energy_s[0]+=factor*t3d[i]*t3d[i]/inner_fac;
energy2_s[0]+=factor*t3d[i]*(t3_sd[i]+t3d[i])/inner_fac;
i++;
}
*/
if(thread_x<total_size)
for(int i=0;i<h3d;i++)
{
inner_fac = -e4-e5-e6+e1+e2+eval3[i]; //t2_shm[i];
//ckbn avoid e1 in case we need just (T)
energy_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*t3d[thread_x*h3d+i]/inner_fac;
energy2_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*(t3_sd[thread_x*h3d+i]+t3d[thread_x*h3d+i])/inner_fac;
}
__syncthreads();
}
if(threadIdx.x==0)
{
/* limit = blockDim.x;
if (blockIdx.x == (gridDim.x-1)) limit = total_size%blockDim.x;
for(int i=0;i<limit;i++)
{
energy[blockIdx.x]+=energy_s[i];
energy[blockIdx.x+gridDim.x]+=energy2_s[i];
}
*/
energy[blockIdx.x] = energy_s[0];
energy[blockIdx.x+gridDim.x] = energy2_s[0];
}
__syncthreads();
}
extern "C" void compute_energy(double factor, double* energy, double* eval1, double* eval2,double* eval3,double* eval4,double* eval5,double* eval6,int h1d, int h2d, int h3d, int p4d, int p5d,int p6d, double* host1, double* host2)
//ckbn en_comment, double* total_d, double* total_s)
{
double* energy_d, *energy_h;
double* eval_d1,*eval_d2,*eval_d3,*eval_d4,*eval_d5,*eval_d6;
int size_energy = 2*sizeof(double);
int total_block = DIV_UB((h1d*h2d*p4d*p5d*p6d), (T2*T1));
// int total_block = 1;
int total_elements = h1d*h2d*p4d*p5d*p6d;
energy_d = (double*)getGpuMem(size_energy*total_block*2);
int i=0,in;
double* t3 = (double*)malloc(sizeof(double)*h3d*total_elements);
double* ts3 = (double*)malloc(sizeof(double)*h3d*total_elements);
energy_h = (double*)getHostMem(size_energy*2*total_block);
eval_d1 = (double*)getGpuMem(h1d*sizeof(double));
eval_d2 = (double*)getGpuMem(h2d*sizeof(double));
eval_d3 = (double*)getGpuMem(h3d*sizeof(double));
eval_d4 = (double*)getGpuMem(p4d*sizeof(double));
eval_d5 = (double*)getGpuMem(p5d*sizeof(double));
eval_d6 = (double*)getGpuMem(p6d*sizeof(double));
CUDA_SAFE(cudaMemcpy(eval_d1, eval1, h1d*sizeof(double), cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(eval_d2, eval2, h2d*sizeof(double), cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(eval_d3, eval3, h3d*sizeof(double), cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(eval_d4, eval4, p4d*sizeof(double), cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(eval_d5, eval5, p5d*sizeof(double), cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(eval_d6, eval6, p6d*sizeof(double), cudaMemcpyHostToDevice));
/* for test only */
//printf("host 2 is %f %f\n", host2[0], host2[1]);
// CUDA_SAFE(cudaMemcpy(t3_s_d, host2, total_elements*h3d*sizeof(double), cudaMemcpyHostToDevice));
dim3 dimBlock(1); //T2*T1);
dim3 dimGrid(total_block);
compute_energy_kernel<<<dimGrid,dimBlock,0>>>(h1d,h2d,h3d,p4d,p5d,p6d, eval_d1,eval_d2,eval_d3,eval_d4,eval_d5,eval_d6,energy_d, factor, h1d*h2d*p4d*p5d*p6d, t3_d, t3_s_d);
cudaThreadSynchronize();
//CHECK_ERR("Kernel execution failed");
CUDA_SAFE(cudaMemcpy(((char *) energy_h) , ((char *) energy_d) ,
size_energy*total_block*2, cudaMemcpyDeviceToHost));
for(int i=1;i<dimGrid.x;i++)
{
energy_h[0]+=energy_h[i];
energy_h[dimGrid.x]+=energy_h[i+dimGrid.x];
}
// printf("CUDA energy_h is %f %f %d %d %d %d %d %d\n", energy_h[0], energy_h[dimGrid.x]); //, total_size, h1d, h2d, p4d, p5d,p6d);
/*
CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_d) , sizeof(double)*h3d*total_elements, cudaMemcpyDeviceToHost));
CUDA_SAFE(cudaMemcpy(((char *) ts3) , ((char *) t3_s_d) , sizeof(double)*h3d*total_elements, cudaMemcpyDeviceToHost));
total_s[0]=0.0, total_d[0]=0.0;
for(int i=0;i<h3d*total_elements;i++) {
total_s[0] += ts3[i];
total_d[0] += t3[i];
}
*/
// printf("Total doubles and singles %f, %f\n", total_d, total_s);
energy[0] = energy_h[0];
energy[1] = energy_h[dimGrid.x];
freeGpuMem(energy_d);
freeGpuMem(eval_d1);
freeGpuMem(eval_d2);
freeGpuMem(eval_d3);
freeGpuMem(eval_d4);
freeGpuMem(eval_d5);
freeGpuMem(eval_d6);
freeHostMem(energy_h);
}
extern "C" void
compute_en_(double * factor, double * energy, double * eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double* host1, double* host2)
//ckbn en_comment,double* total_d, double* total_s)
{
compute_energy((double) *factor, energy, eval1,eval2, eval3, eval4, eval5, eval6,(int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, host1, host2);
//ckbn en_comment ,total_d, total_s);
}
//__device__ double* t3_d;
extern "C" void set_dev_mem_s(int h1d, int h2d, int h3d, int p4d, int p5d,int p6d)
{
int size_t3;
size_t3 = h1d*h2d*h3d*p4d*p5d*p6d;
t3_s_d = (double *) getGpuMem(size_t3*sizeof(double));
cudaMemset(t3_s_d,0,size_t3*sizeof(double));
}
extern "C" void
dev_mem_s_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d)
{
set_dev_mem_s((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_1_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3, double *t2_d, double *v2_d,int p4, int total_x, double* t3d) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_1_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
//CUDA_SAFE(cudaMalloc((void**) &t3_d, size_t3));
//CUDA_SAFE(cudaMalloc((void**) &t2_d, size_t2));
//CUDA_SAFE(cudaMalloc((void**) &v2_d, size_v2));
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h2ld_t3 = h3d;
h1ld_t3 = h2d * h3d;
p6ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p4ld_t3 = p5d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d);
CHECK_ERR("Kernel execution failed");
}
/*
st = timer();
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}
*/
cudaThreadSynchronize();
// CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
// cudaFree(t2_d);
// cudaFree(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_1_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_1_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h1,h2,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_2_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t2_d, double *v2_d,int p4, int total_x, double* t3d) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}*/
//CUDA_SAFE(cudaMalloc((void**) &t2_d, size_t2));
//CUDA_SAFE(cudaMalloc((void**) &v2_d, size_v2));
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
/* assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}*/
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d ;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h1ld_t3 = h3d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p4ld_t3 = p5d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
// for(i=0;i<nstreams;++i){
sd_t_s1_2_kernel<<<dimGrid,dimBlock,0>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d);
CHECK_ERR("Kernel execution failed");
// }
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
cudaThreadSynchronize();
// CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
/*
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}*/
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
extern "C" void
sd_t_s1_2_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_2_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
extern "C" void
sd_t_s1_3_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d ;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h1ld_t3 = 1;
h3ld_t3 = h1d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p4ld_t3 = p5d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}
*/ cudaThreadSynchronize();
//CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_3_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_3_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_4_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_4_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
/* assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}*/
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
p5ld_v2 = p6d * h3d * h2d;
h3ld_t3 = 1;
h2ld_t3 = h3d;
h1ld_t3 = h2d * h3d;
p6ld_t3 = h1d * h2d * h3d;
p4ld_t3 = p6d * h1d * h2d * h3d;
p5ld_t3 = p4d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
i=0;
// for(i=0;i<nstreams;++i){
sd_t_s1_4_kernel<<<dimGrid,dimBlock,0>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
//sd_t_s1_4_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
// }
cudaThreadSynchronize();
/* CUDA_SAFE(cudaMemcpy(((char *) t3_p) , ((char *) t3_d) , size_block_t3, cudaMemcpyDeviceToHost));
printf("Time for Async DeviceToHost %f\n", et-st);
stream = 0;
// while (stream < nstreams) {
// while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = t3_p; //[stream * size_el_block_t3];
double *dst = t3; //[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] -= src[i];
}
// stream++;
// }
*/
// cudaThreadSynchronize();
/*
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}*/
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_4_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_4_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h1,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_5_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_5_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d ;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
p5ld_v2 = p6d * h3d * h2d;
h3ld_t3 = 1;
h1ld_t3 = h3d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
p4ld_t3 = p6d * h1d * h2d * h3d;
p5ld_t3 = p4d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_5_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}
*/
cudaThreadSynchronize();
//CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_5_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_5_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h1,h3,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_6_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_6_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
p5ld_v2 = p6d * h3d * h2d;
h1ld_t3 = 1;
h3ld_t3 = h1d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
p4ld_t3 = p6d * h1d * h2d * h3d;
p5ld_t3 = p4d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_6_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/* for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
cudaThreadSynchronize();
//CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_6_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_6_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_7_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_7_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h2ld_t3 = h3d;
h1ld_t3 = h2d * h3d;
p4ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p6ld_t3 = p4d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
cudaThreadSynchronize();
//CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_7_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_7_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_8_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
/*----------------------------------------------------------------------*
*t3[h3,h1,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
extern "C" void
sd_t_s1_8_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h1ld_t3 = h3d;
h2ld_t3 = h1d * h3d;
p4ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p6ld_t3 = p4d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_8_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
cudaThreadSynchronize();
// CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
extern "C" void
sd_t_s1_8_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_8_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h1,h3,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
extern "C" void
sd_t_s1_9_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h1ld_t3 = 1;
h3ld_t3 = h1d;
h2ld_t3 = h1d * h3d;
p4ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p6ld_t3 = p4d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
cudaThreadSynchronize();
//CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
// printf("out is %lf\n", t3_p[0]);
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
//freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
extern "C" void
sd_t_s1_9_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_9_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
|
ca3aa302d18f3536b4900a121f65969646be1cf8.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
MIT License
Copyright (c) 2016 Antti-Pekka Hynninen
Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*******************************************************************************/
#include <hip/hip_runtime.h>
#include "CudaUtils.h"
#include "LRUCache.h"
#include "cuttkernel.h"
#define RESTRICT __restrict__
//
// Transpose when Mm and Mk don't overlap and contain only single rank
//
// dim3 numthread(TILEDIM, TILEROWS, 1);
// dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMk-1)/TILEDIM+1), 1, plan.volMbar);
//
template <typename T>
__global__ void transposeTiled(
const int numMm, const int volMbar, const int sizeMbar,
const int2 tiledVol, const int cuDimMk, const int cuDimMm,
const TensorConvInOut* RESTRICT glMbar,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
// Shared memory
__shared__ T shTile[TILEDIM][TILEDIM+1];
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = glMbar[warpLane];
}
const int bx = (blockIdx.x % numMm)*TILEDIM;
const int by = (blockIdx.x / numMm)*TILEDIM;
const int xin = bx + threadIdx.x;
const int yin = by + threadIdx.y;
const int xout = bx + threadIdx.y;
const int yout = by + threadIdx.x;
const unsigned int maskIny = __ballot_sync(0xffffffff,(yin + warpLane < tiledVol.y))*(xin < tiledVol.x);
const unsigned int maskOutx = __ballot_sync(0xffffffff,(xout + warpLane < tiledVol.x))*(yout < tiledVol.y);
const int posMinorIn = xin + yin*cuDimMk;
const int posMinorOut = yout + xout*cuDimMm;
const int posInAdd = TILEROWS*cuDimMk;
const int posOutAdd = TILEROWS*cuDimMm;
for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z)
{
// Compute global memory positions
int posMajorIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
int posMajorOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMajorIn += __shfl_xor_sync(0xffffffff,posMajorIn,i);
posMajorOut += __shfl_xor_sync(0xffffffff,posMajorOut,i);
}
int posIn = posMajorIn + posMinorIn;
int posOut = posMajorOut + posMinorOut;
// Read from global memory
__syncthreads();
// Read data into shared memory tile
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// int pos = posIn + j*cuDimMk;
// if (xin < readVol.x && yin + j < readVol.y) {
if ((maskIny & (1 << j)) != 0) {
shTile[threadIdx.y + j][threadIdx.x] = dataIn[posIn];
}
posIn += posInAdd;
}
// Write to global memory
__syncthreads();
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// int pos = posOut + j*cuDimMm;
// if (xout + j < readVol.x && yout < readVol.y) {
if ((maskOutx & (1 << j)) != 0 ) {
dataOut[posOut] = shTile[threadIdx.x][threadIdx.y + j];
}
posOut += posOutAdd;
}
}
}
//
// Packed transpose. Thread block loads plan.volMmk number of elements
//
template <typename T, int numRegStorage>
__global__ void transposePacked(
const int volMmk, const int volMbar,
const int sizeMmk, const int sizeMbar,
const TensorConvInOut* RESTRICT gl_Mmk,
const TensorConvInOut* RESTRICT gl_Mbar,
const TensorConv* RESTRICT gl_Msh,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
// Shared memory. volMmk elements
extern __shared__ char shBuffer_char[];
T* shBuffer = (T *)shBuffer_char;
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mmk;
Mmk.c_in = 1;
Mmk.d_in = 1;
Mmk.c_out = 1;
Mmk.d_out = 1;
if (warpLane < sizeMmk) {
Mmk = gl_Mmk[warpLane];
}
TensorConv Msh;
Msh.c = 1;
Msh.d = 1;
if (warpLane < sizeMmk) {
Msh = gl_Msh[warpLane];
}
// Pre-compute tensor positions in Mmk
// 3*numRegStorage registers
int posMmkIn[numRegStorage];
int posMmkOut[numRegStorage];
int posSh[numRegStorage];
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
posMmkIn[j] = 0;
posMmkOut[j] = 0;
posSh[j] = 0;
}
for (int i=0;i < sizeMmk;i++) {
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
posMmkIn[j] += ((posMmk / __shfl_sync(0xffffffff,Mmk.c_in,i)) % __shfl_sync(0xffffffff,Mmk.d_in,i))*__shfl_sync(0xffffffff,Mmk.ct_in,i);
posMmkOut[j] += ((posMmk / __shfl_sync(0xffffffff,Mmk.c_out,i)) % __shfl_sync(0xffffffff,Mmk.d_out,i))*__shfl_sync(0xffffffff,Mmk.ct_out,i);
posSh[j] += ((posMmk / __shfl_sync(0xffffffff,Msh.c,i)) % __shfl_sync(0xffffffff,Msh.d,i))*__shfl_sync(0xffffffff,Msh.ct,i);
}
}
// 6 registers
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = gl_Mbar[warpLane];
}
for (int posMbar=blockIdx.x;posMbar < volMbar;posMbar += gridDim.x)
{
int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarOut += __shfl_xor_sync(0xffffffff,posMbarOut,i);
}
int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarIn += __shfl_xor_sync(0xffffffff,posMbarIn,i);
}
__syncthreads();
// Read from global memory
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posIn = posMbarIn + posMmkIn[j];
if (posMmk < volMmk) shBuffer[posMmk] = dataIn[posIn];
}
__syncthreads();
// Write to global memory
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posOut = posMbarOut + posMmkOut[j];
if (posMmk < volMmk) dataOut[posOut] = shBuffer[posSh[j]];
}
}
}
//
// Packed method with a split rank
//
// dim nthread(((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize, 1, 1)
// dim nblock(ts.numSplit, min(256, max(1, ts.volMbar)), 1)
//
template <typename T, int numRegStorage>
__global__ void transposePackedSplit(
const int splitDim, const int volMmkUnsplit, const int volMbar,
const int sizeMmk, const int sizeMbar,
const int cMmSplit, const int cMkSplit,
const TensorConvInOut* RESTRICT glMmk,
const TensorConvInOut* RESTRICT glMbar,
const TensorConv* RESTRICT glMsh,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
// Shared memory. max(volSplit)*volMmkUnsplit T elements
extern __shared__ char shBuffer_char[];
T* shBuffer = (T *)shBuffer_char;
const int warpLane = threadIdx.x & (warpSize - 1);
// const int plusone = (blockIdx.x < (splitDim % gridDim.x));
const int p0 = blockIdx.x*splitDim/gridDim.x;
const int volSplit = (blockIdx.x + 1)*splitDim/gridDim.x - p0;
const int plusone = volSplit - splitDim/gridDim.x;
TensorConvInOut Mmk;
Mmk.c_in = 1;
Mmk.d_in = 1;
Mmk.c_out = 1;
Mmk.d_out = 1;
if (warpLane < sizeMmk) {
Mmk = glMmk[warpLane + plusone*sizeMmk];
}
TensorConv Msh;
Msh.c = 1;
Msh.d = 1;
if (warpLane < sizeMmk) {
Msh = glMsh[warpLane + plusone*sizeMmk];
}
// gridDim.x = number of splits
// blockIdx.x = {0 ... gridDim.x - 1} is the split-index
// Volume of this split
// const int volSplit = (splitDim/gridDim.x) + plusone;
// Start position in this split
// const int p0 = (splitDim/gridDim.x)*blockIdx.x + min(blockIdx.x, (splitDim % gridDim.x));
const int posMmkIn0 = p0*cMmSplit;
const int posMmkOut0 = p0*cMkSplit;
// Volume of split Mmk
const int volMmkSplit = volSplit*volMmkUnsplit;
// Pre-compute tensor positions in Mmk
// 3*numRegStorage registers
int posMmkIn[numRegStorage];
int posMmkOut[numRegStorage];
int posSh[numRegStorage];
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
posMmkIn[j] = posMmkIn0;
posMmkOut[j] = posMmkOut0;
posSh[j] = 0;
}
for (int i=0;i < sizeMmk;i++) {
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int t = threadIdx.x + j*blockDim.x;
posMmkIn[j] += ((t/__shfl_sync(0xffffffff,Mmk.c_in,i)) % __shfl_sync(0xffffffff,Mmk.d_in,i))*__shfl_sync(0xffffffff,Mmk.ct_in,i);
posMmkOut[j] += ((t/__shfl_sync(0xffffffff,Mmk.c_out,i)) % __shfl_sync(0xffffffff,Mmk.d_out,i))*__shfl_sync(0xffffffff,Mmk.ct_out,i);
posSh[j] += ((t/__shfl_sync(0xffffffff,Msh.c,i)) % __shfl_sync(0xffffffff,Msh.d,i))*__shfl_sync(0xffffffff,Msh.ct,i);
}
}
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = glMbar[warpLane];
}
const int posMbar0 = blockIdx.y*volMbar/gridDim.y;
const int posMbar1 = (blockIdx.y + 1)*volMbar/gridDim.y;
for (int posMbar=posMbar0;posMbar < posMbar1;posMbar++)
// for (int posMbar=blockIdx.y;posMbar < volMbar;posMbar+=gridDim.y)
{
int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarOut += __shfl_xor_sync(0xffffffff,posMbarOut,i);
}
int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarIn += __shfl_xor_sync(0xffffffff,posMbarIn,i);
}
// Read from global memory
__syncthreads();
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posIn = posMbarIn + posMmkIn[j];
if (posMmk < volMmkSplit) shBuffer[posMmk] = dataIn[posIn];
}
// Write to global memory
__syncthreads();
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posOut = posMbarOut + posMmkOut[j];
if (posMmk < volMmkSplit) dataOut[posOut] = shBuffer[posSh[j]];
}
}
}
#if 1
//
// Transpose when the lead dimension is the same, e.g. (1, 2, 3) -> (1, 3, 2)
//
// dim3 numthread(TILEDIM, TILEROWS, 1);
// dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMkBar-1)/TILEDIM+1), 1, plan.volMbar);
//
template <typename T>
__global__ void transposeTiledCopy(
const int numMm, const int volMbar, const int sizeMbar,
const int cuDimMk, const int cuDimMm,
const int2 tiledVol,
const TensorConvInOut* RESTRICT gl_Mbar,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = gl_Mbar[warpLane];
}
const int bx = (blockIdx.x % numMm)*TILEDIM;
const int by = (blockIdx.x / numMm)*TILEDIM;
const int x = bx + threadIdx.x;
const int y = by + threadIdx.y;
const unsigned int mask = __ballot_sync(0xffffffff,(y + warpLane < tiledVol.y))*(x < tiledVol.x);
const int posMinorIn = x + y*cuDimMk;
const int posMinorOut = x + y*cuDimMm;
const int posInAdd = TILEROWS*cuDimMk;
const int posOutAdd = TILEROWS*cuDimMm;
for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z)
{
// Compute global memory positions
int posMajorIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
int posMajorOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMajorIn += __shfl_xor_sync(0xffffffff,posMajorIn,i);
posMajorOut += __shfl_xor_sync(0xffffffff,posMajorOut,i);
}
int posIn = posMajorIn + posMinorIn;
int posOut = posMajorOut + posMinorOut;
// Variables where values are stored
T val[TILEDIM/TILEROWS];
// Read global memory
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// if ((x < tiledVol.x) && (y + j < tiledVol.y)) {
if ((mask & (1 << j)) != 0) {
val[j/TILEROWS] = dataIn[posIn];
}
posIn += posInAdd;
}
// Write global memory
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// if ((x < tiledVol.x) && (y + j < tiledVol.y)) {
if ((mask & (1 << j)) != 0) {
dataOut[posOut] = val[j/TILEROWS];
}
posOut += posOutAdd;
}
}
}
#else
//
// Returns scalar tensor position. Each lane has the same p
// NOTE: c and d on inactive warps must be 1 !!
//
__device__ __forceinline__
int tensorPos(
const int p, const int rank, const int c, const int d, const int ct,
const int numLane=warpSize
) {
int r = ((p/c) % d)*ct;
#pragma unroll
for (int i=numLane/2;i >= 1;i/=2) {
r += __shfl_xor_sync(0xffffffff,r,i);
}
return r;
}
//
// Transpose when the lead dimension is the same, e.g. (1, 2, 3) -> (1, 3, 2)
//
// dim3 numthread(TILEDIM, TILEROWS, 1);
// dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMkBar-1)/TILEDIM+1), 1, plan.volMbar);
//
template <typename T>
__global__ void transposeTiledCopy(
const int numMm, const int volMbar, const int sizeMbar,
const int cuDimMk, const int cuDimMm,
const int2 tiledVol,
const TensorConvInOut* RESTRICT gl_Mbar,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = gl_Mbar[warpLane];
}
const int bx = (blockIdx.x % numMm)*TILEDIM;
const int by = (blockIdx.x / numMm)*TILEDIM;
const int x = bx + threadIdx.x;
const int y = by + threadIdx.y;
for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z)
{
// Variables where values are stored
T val[TILEDIM/TILEROWS];
// Read global memory
{
int pos0 = tensorPos(posMbar, sizeMbar, Mbar.c_in, Mbar.d_in, Mbar.ct_in);
pos0 += x + y*cuDimMk;
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
int pos = pos0 + j*cuDimMk;
if ((x < tiledVol.x) && (y + j < tiledVol.y)) {
val[j/TILEROWS] = dataIn[pos];
}
}
}
// Write global memory
{
int pos0 = tensorPos(posMbar, sizeMbar, Mbar.c_out, Mbar.d_out, Mbar.ct_out);
pos0 += x + y*cuDimMm;
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
int pos = pos0 + j*cuDimMm;
if ((x < tiledVol.x) && (y + j < tiledVol.y)) {
dataOut[pos] = val[j/TILEROWS];
}
}
}
}
}
#endif
//######################################################################################
//######################################################################################
//######################################################################################
//
// Sets shared memory bank configuration for all kernels. Needs to be called once per device.
//
void cuttKernelSetSharedMemConfig() {
#define CALL(NREG) cudaCheck(hipFuncSetSharedMemConfig(transposePacked<float, NREG>, hipSharedMemBankSizeFourByte ))
#include "calls.h"
#undef CALL
#define CALL(NREG) cudaCheck(hipFuncSetSharedMemConfig(transposePacked<double, NREG>, hipSharedMemBankSizeEightByte ))
#include "calls.h"
#undef CALL
#define CALL(NREG) cudaCheck(hipFuncSetSharedMemConfig(transposePackedSplit<float, NREG>, hipSharedMemBankSizeFourByte ))
#include "calls.h"
#undef CALL
#define CALL(NREG) cudaCheck(hipFuncSetSharedMemConfig(transposePackedSplit<double, NREG>, hipSharedMemBankSizeEightByte ))
#include "calls.h"
#undef CALL
cudaCheck(hipFuncSetSharedMemConfig(transposeTiled<float>, hipSharedMemBankSizeFourByte));
cudaCheck(hipFuncSetSharedMemConfig(transposeTiledCopy<float>, hipSharedMemBankSizeFourByte));
cudaCheck(hipFuncSetSharedMemConfig(transposeTiled<double>, hipSharedMemBankSizeEightByte));
cudaCheck(hipFuncSetSharedMemConfig(transposeTiledCopy<double>, hipSharedMemBankSizeEightByte));
}
// Caches for PackedSplit kernels. One cache for all devices
// NOTE: Not thread safe
const int CACHE_SIZE = 100000;
const int MAX_NUMWARP = (1024/32);
const int MAX_NUMTYPE = 2;
static int numDevices = -1;
LRUCache<unsigned long long int, int> nabCache(CACHE_SIZE, -1);
//
// Returns the maximum number of active blocks per SM
//
int getNumActiveBlock(const int method, const int sizeofType, const LaunchConfig& lc,
const int deviceID, const hipDeviceProp_t& prop) {
int numActiveBlock;
int numthread = lc.numthread.x * lc.numthread.y * lc.numthread.z;
switch(method) {
case Trivial:
{
// This value does not matter, but should be > 0
numActiveBlock = 1;
}
break;
case Packed:
{
#define CALL0(TYPE, NREG) \
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock, \
transposePacked<TYPE, NREG>, numthread, lc.shmemsize)
switch(lc.numRegStorage) {
#define CALL(ICASE) case ICASE: if (sizeofType == 4) CALL0(float, ICASE); if (sizeofType == 8) CALL0(double, ICASE); break
#include "calls.h"
}
#undef CALL
#undef CALL0
}
break;
case PackedSplit:
{
// Allocate cache structure if needed
if (numDevices == -1) {
cudaCheck(hipGetDeviceCount(&numDevices));
}
// Build unique key for cache
int key_warp = (numthread/prop.warpSize - 1);
if (key_warp >= MAX_NUMWARP) {
printf("getNumActiveBlock maximum number of warps exceeded\n");
exit(1);
}
int key_reg = (lc.numRegStorage - 1);
int key_type = (sizeofType == 4);
unsigned long long int key =
(unsigned long long int)(lc.shmemsize/sizeofType)*MAX_NUMWARP*MAX_REG_STORAGE*MAX_NUMTYPE*numDevices +
(unsigned long long int)deviceID*MAX_NUMWARP*MAX_REG_STORAGE*MAX_NUMTYPE +
(unsigned long long int)key_type*MAX_NUMWARP*MAX_REG_STORAGE +
(unsigned long long int)key_reg*MAX_NUMWARP +
(unsigned long long int)key_warp;
numActiveBlock = nabCache.get(key);
if (numActiveBlock == -1) {
// key not found in cache, determine value and add it to cache
#define CALL0(TYPE, NREG) \
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock, \
transposePackedSplit<TYPE, NREG>, numthread, lc.shmemsize)
switch(lc.numRegStorage) {
#define CALL(ICASE) case ICASE: if (sizeofType == 4) CALL0(float, ICASE); if (sizeofType == 8) CALL0(double, ICASE); break
#include "calls.h"
}
#undef CALL
#undef CALL0
nabCache.set(key, numActiveBlock);
}
}
break;
case Tiled:
{
if (sizeofType == 4) {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiled<float>, numthread, lc.shmemsize);
} else {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiled<double>, numthread, lc.shmemsize);
}
}
break;
case TiledCopy:
{
if (sizeofType == 4) {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiledCopy<float>, numthread, lc.shmemsize);
} else {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiledCopy<double>, numthread, lc.shmemsize);
}
}
break;
}
return numActiveBlock;
}
//
// Sets up kernel launch configuration
//
// Returns the number of active blocks per SM that can be achieved on the Packed kernel
// NOTE: Returns 0 when kernel execution is not possible
//
// Sets:
// lc.numthread
// lc.numblock
// lc.shmemsize
// lc.numRegStorage (for Packed method)
//
int cuttKernelLaunchConfiguration(const int sizeofType, const TensorSplit& ts,
const int deviceID, const hipDeviceProp_t& prop, LaunchConfig& lc) {
// Return value of numActiveBlock
int numActiveBlockReturn = -1;
switch(ts.method) {
case Trivial:
{
// These values don't matter
lc.numthread.x = 1;
lc.numthread.y = 1;
lc.numthread.z = 1;
lc.numblock.x = 1;
lc.numblock.y = 1;
lc.numblock.z = 1;
lc.numblock.z = 1;
lc.numblock.z = 1;
lc.shmemsize = 0;
lc.numRegStorage = 0;
}
break;
case Packed:
{
// Amount of shared memory required
lc.shmemsize = ts.shmemAlloc(sizeofType); //ts.volMmk*sizeofType;
// Check that we're not using too much shared memory per block
if (lc.shmemsize > prop.sharedMemPerBlock) {
// printf("lc.shmemsize %d prop.sharedMemPerBlock %d\n", lc.shmemsize, prop.sharedMemPerBlock);
return 0;
}
// Min and max number of threads we can use
int minNumthread = ((ts.volMmk - 1)/(prop.warpSize*MAX_REG_STORAGE) + 1)*prop.warpSize;
int maxNumthread = ((ts.volMmk - 1)/(prop.warpSize) + 1)*prop.warpSize;
if (minNumthread > prop.maxThreadsPerBlock) return 0;
maxNumthread = min(prop.maxThreadsPerBlock, maxNumthread);
// printf("minNumthread %d maxNumthread %d\n", minNumthread, maxNumthread);
// Min and max number of register storage we can use
int minNumRegStorage = (ts.volMmk - 1)/maxNumthread + 1;
int maxNumRegStorage = (ts.volMmk - 1)/minNumthread + 1;
// printf("minNumRegStorage %d maxNumRegStorage %d\n", minNumRegStorage, maxNumRegStorage);
int bestVal = 0;
int bestNumRegStorage = 0;
int bestNumActiveBlock = 0;
lc.numthread.y = 1;
lc.numthread.z = 1;
lc.numblock.x = max(1, ts.volMbar);
lc.numblock.x = min(prop.multiProcessorCount*18, lc.numblock.x);
lc.numblock.y = 1;
lc.numblock.z = 1;
for (lc.numRegStorage=minNumRegStorage;lc.numRegStorage <= maxNumRegStorage;lc.numRegStorage++) {
lc.numthread.x = ((ts.volMmk - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
int numActiveBlock = getNumActiveBlock(ts.method, sizeofType, lc, deviceID, prop);
// int val = numActiveBlock*lc.numthread.x;
int val = ts.volMmkUsed()*numActiveBlock;
if (val > bestVal) {
bestVal = val;
bestNumRegStorage = lc.numRegStorage;
bestNumActiveBlock = numActiveBlock;
}
}
if (bestNumRegStorage == 0) return 0;
lc.numRegStorage = bestNumRegStorage;
lc.numthread.x = ((ts.volMmk - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
numActiveBlockReturn = bestNumActiveBlock;
}
break;
case PackedSplit:
{
// Amount of shared memory required
lc.shmemsize = ts.shmemAlloc(sizeofType);
// Check that we're not using too much shared memory per block
if (lc.shmemsize > prop.sharedMemPerBlock) {
// printf("lc.shmemsize %d prop.sharedMemPerBlock %d\n", lc.shmemsize, prop.sharedMemPerBlock);
return 0;
}
int volMmkWithSplit = (ts.splitDim/ts.numSplit + ((ts.splitDim % ts.numSplit) > 0))*ts.volMmkUnsplit;
// Min and max number of threads we can use
int minNumthread = ((volMmkWithSplit - 1)/(prop.warpSize*MAX_REG_STORAGE) + 1)*prop.warpSize;
int maxNumthread = ((volMmkWithSplit - 1)/(prop.warpSize) + 1)*prop.warpSize;
if (minNumthread > prop.maxThreadsPerBlock) return 0;
maxNumthread = min(prop.maxThreadsPerBlock, maxNumthread);
// printf("minNumthread %d maxNumthread %d\n", minNumthread, maxNumthread);
// Min and max number of register storage we can use
int minNumRegStorage = (volMmkWithSplit - 1)/maxNumthread + 1;
int maxNumRegStorage = (volMmkWithSplit - 1)/minNumthread + 1;
// printf("minNumRegStorage %d maxNumRegStorage %d\n", minNumRegStorage, maxNumRegStorage);
int bestVal = 0;
int bestNumRegStorage = 0;
int bestNumActiveBlock = 0;
lc.numthread.y = 1;
lc.numthread.z = 1;
lc.numblock.x = ts.numSplit;
lc.numblock.y = max(1, min((prop.multiProcessorCount*18)/lc.numblock.x, ts.volMbar));
lc.numblock.z = 1;
for (lc.numRegStorage=minNumRegStorage;lc.numRegStorage <= maxNumRegStorage;lc.numRegStorage++) {
lc.numthread.x = ((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
int numActiveBlock = getNumActiveBlock(ts.method, sizeofType, lc, deviceID, prop);
// int val = numActiveBlock*lc.numthread.x*lc.numRegStorage;
int val = ts.volMmkUsed()*numActiveBlock;
if (val > bestVal) {
bestVal = val;
bestNumRegStorage = lc.numRegStorage;
bestNumActiveBlock = numActiveBlock;
}
}
if (bestNumRegStorage == 0) return 0;
lc.numRegStorage = bestNumRegStorage;
lc.numthread.x = ((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
numActiveBlockReturn = bestNumActiveBlock;
}
break;
case Tiled:
{
lc.numthread.x = TILEDIM;
lc.numthread.y = TILEROWS;
lc.numthread.z = 1;
lc.numblock.x = ((ts.volMm - 1)/TILEDIM + 1)*((ts.volMk - 1)/TILEDIM + 1);
lc.numblock.y = 1;
lc.numblock.z = max(1, min((prop.multiProcessorCount*8)/(lc.numblock.x*lc.numblock.y), ts.volMbar));
lc.shmemsize = 0;
lc.numRegStorage = 0;
}
break;
case TiledCopy:
{
lc.numthread.x = TILEDIM;
lc.numthread.y = TILEROWS;
lc.numthread.z = 1;
lc.numblock.x = ((ts.volMm - 1)/TILEDIM + 1)*((ts.volMkBar - 1)/TILEDIM + 1);
lc.numblock.y = 1;
lc.numblock.z = ts.volMbar;
lc.numblock.z = min((prop.multiProcessorCount*8)/(lc.numblock.x*lc.numblock.y), lc.numblock.z);
lc.numblock.z = max(1, lc.numblock.z);
lc.shmemsize = 0;
lc.numRegStorage = 0;
}
break;
}
if (lc.numblock.x > prop.maxGridSize[0] ||
lc.numblock.y > prop.maxGridSize[1] ||
lc.numblock.z > prop.maxGridSize[2]) return 0;
// Return the number of active blocks with these settings
if (numActiveBlockReturn == -1) {
// Not set, get it
numActiveBlockReturn = getNumActiveBlock(ts.method, sizeofType, lc, deviceID, prop);
}
return numActiveBlockReturn;
}
bool cuttKernel(cuttPlan_t& plan, void* dataIn, void* dataOut) {
LaunchConfig& lc = plan.launchConfig;
TensorSplit& ts = plan.tensorSplit;
switch(ts.method) {
case Trivial:
{
cudaCheck(hipMemcpyAsync(dataOut, dataIn, ts.volMmk*ts.volMbar*plan.sizeofType,
hipMemcpyDefault, plan.stream));
}
break;
case Packed:
{
switch(lc.numRegStorage) {
#define CALL0(TYPE, NREG) \
hipLaunchKernelGGL(( transposePacked<TYPE, NREG>) , dim3(lc.numblock), dim3(lc.numthread), lc.shmemsize, plan.stream , \
ts.volMmk, ts.volMbar, ts.sizeMmk, ts.sizeMbar, \
plan.Mmk, plan.Mbar, plan.Msh, (TYPE *)dataIn, (TYPE *)dataOut)
#define CALL(ICASE) case ICASE: if (plan.sizeofType == 4) CALL0(float, ICASE); if (plan.sizeofType == 8) CALL0(double, ICASE); break
#include "calls.h"
default:
printf("cuttKernel no template implemented for numRegStorage %d\n", lc.numRegStorage);
return false;
#undef CALL
#undef CALL0
}
}
break;
case PackedSplit:
{
switch(lc.numRegStorage) {
#define CALL0(TYPE, NREG) \
hipLaunchKernelGGL(( transposePackedSplit<TYPE, NREG>) , dim3(lc.numblock), dim3(lc.numthread), lc.shmemsize, plan.stream , \
ts.splitDim, ts.volMmkUnsplit, ts. volMbar, ts.sizeMmk, ts.sizeMbar, \
plan.cuDimMm, plan.cuDimMk, plan.Mmk, plan.Mbar, plan.Msh, (TYPE *)dataIn, (TYPE *)dataOut)
#define CALL(ICASE) case ICASE: if (plan.sizeofType == 4) CALL0(float, ICASE); if (plan.sizeofType == 8) CALL0(double, ICASE); break
#include "calls.h"
default:
printf("cuttKernel no template implemented for numRegStorage %d\n", lc.numRegStorage);
return false;
#undef CALL
#undef CALL0
}
}
break;
case Tiled:
{
#define CALL(TYPE) \
hipLaunchKernelGGL(( transposeTiled<TYPE>) , dim3(lc.numblock), dim3(lc.numthread), 0, plan.stream , \
((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.tiledVol, plan.cuDimMk, plan.cuDimMm, \
plan.Mbar, (TYPE *)dataIn, (TYPE *)dataOut)
if (plan.sizeofType == 4) CALL(float);
if (plan.sizeofType == 8) CALL(double);
#undef CALL
}
break;
case TiledCopy:
{
#define CALL(TYPE) \
hipLaunchKernelGGL(( transposeTiledCopy<TYPE>) , dim3(lc.numblock), dim3(lc.numthread), 0, plan.stream , \
((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.cuDimMk, plan.cuDimMm, plan.tiledVol, \
plan.Mbar, (TYPE *)dataIn, (TYPE *)dataOut)
if (plan.sizeofType == 4) CALL(float);
if (plan.sizeofType == 8) CALL(double);
#undef CALL
}
break;
}
cudaCheck(hipGetLastError());
return true;
}
| ca3aa302d18f3536b4900a121f65969646be1cf8.cu | /******************************************************************************
MIT License
Copyright (c) 2016 Antti-Pekka Hynninen
Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*******************************************************************************/
#include <cuda.h>
#include "CudaUtils.h"
#include "LRUCache.h"
#include "cuttkernel.h"
#define RESTRICT __restrict__
//
// Transpose when Mm and Mk don't overlap and contain only single rank
//
// dim3 numthread(TILEDIM, TILEROWS, 1);
// dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMk-1)/TILEDIM+1), 1, plan.volMbar);
//
template <typename T>
__global__ void transposeTiled(
const int numMm, const int volMbar, const int sizeMbar,
const int2 tiledVol, const int cuDimMk, const int cuDimMm,
const TensorConvInOut* RESTRICT glMbar,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
// Shared memory
__shared__ T shTile[TILEDIM][TILEDIM+1];
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = glMbar[warpLane];
}
const int bx = (blockIdx.x % numMm)*TILEDIM;
const int by = (blockIdx.x / numMm)*TILEDIM;
const int xin = bx + threadIdx.x;
const int yin = by + threadIdx.y;
const int xout = bx + threadIdx.y;
const int yout = by + threadIdx.x;
const unsigned int maskIny = __ballot_sync(0xffffffff,(yin + warpLane < tiledVol.y))*(xin < tiledVol.x);
const unsigned int maskOutx = __ballot_sync(0xffffffff,(xout + warpLane < tiledVol.x))*(yout < tiledVol.y);
const int posMinorIn = xin + yin*cuDimMk;
const int posMinorOut = yout + xout*cuDimMm;
const int posInAdd = TILEROWS*cuDimMk;
const int posOutAdd = TILEROWS*cuDimMm;
for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z)
{
// Compute global memory positions
int posMajorIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
int posMajorOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMajorIn += __shfl_xor_sync(0xffffffff,posMajorIn,i);
posMajorOut += __shfl_xor_sync(0xffffffff,posMajorOut,i);
}
int posIn = posMajorIn + posMinorIn;
int posOut = posMajorOut + posMinorOut;
// Read from global memory
__syncthreads();
// Read data into shared memory tile
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// int pos = posIn + j*cuDimMk;
// if (xin < readVol.x && yin + j < readVol.y) {
if ((maskIny & (1 << j)) != 0) {
shTile[threadIdx.y + j][threadIdx.x] = dataIn[posIn];
}
posIn += posInAdd;
}
// Write to global memory
__syncthreads();
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// int pos = posOut + j*cuDimMm;
// if (xout + j < readVol.x && yout < readVol.y) {
if ((maskOutx & (1 << j)) != 0 ) {
dataOut[posOut] = shTile[threadIdx.x][threadIdx.y + j];
}
posOut += posOutAdd;
}
}
}
//
// Packed transpose. Thread block loads plan.volMmk number of elements
//
template <typename T, int numRegStorage>
__global__ void transposePacked(
const int volMmk, const int volMbar,
const int sizeMmk, const int sizeMbar,
const TensorConvInOut* RESTRICT gl_Mmk,
const TensorConvInOut* RESTRICT gl_Mbar,
const TensorConv* RESTRICT gl_Msh,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
// Shared memory. volMmk elements
extern __shared__ char shBuffer_char[];
T* shBuffer = (T *)shBuffer_char;
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mmk;
Mmk.c_in = 1;
Mmk.d_in = 1;
Mmk.c_out = 1;
Mmk.d_out = 1;
if (warpLane < sizeMmk) {
Mmk = gl_Mmk[warpLane];
}
TensorConv Msh;
Msh.c = 1;
Msh.d = 1;
if (warpLane < sizeMmk) {
Msh = gl_Msh[warpLane];
}
// Pre-compute tensor positions in Mmk
// 3*numRegStorage registers
int posMmkIn[numRegStorage];
int posMmkOut[numRegStorage];
int posSh[numRegStorage];
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
posMmkIn[j] = 0;
posMmkOut[j] = 0;
posSh[j] = 0;
}
for (int i=0;i < sizeMmk;i++) {
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
posMmkIn[j] += ((posMmk / __shfl_sync(0xffffffff,Mmk.c_in,i)) % __shfl_sync(0xffffffff,Mmk.d_in,i))*__shfl_sync(0xffffffff,Mmk.ct_in,i);
posMmkOut[j] += ((posMmk / __shfl_sync(0xffffffff,Mmk.c_out,i)) % __shfl_sync(0xffffffff,Mmk.d_out,i))*__shfl_sync(0xffffffff,Mmk.ct_out,i);
posSh[j] += ((posMmk / __shfl_sync(0xffffffff,Msh.c,i)) % __shfl_sync(0xffffffff,Msh.d,i))*__shfl_sync(0xffffffff,Msh.ct,i);
}
}
// 6 registers
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = gl_Mbar[warpLane];
}
for (int posMbar=blockIdx.x;posMbar < volMbar;posMbar += gridDim.x)
{
int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarOut += __shfl_xor_sync(0xffffffff,posMbarOut,i);
}
int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarIn += __shfl_xor_sync(0xffffffff,posMbarIn,i);
}
__syncthreads();
// Read from global memory
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posIn = posMbarIn + posMmkIn[j];
if (posMmk < volMmk) shBuffer[posMmk] = dataIn[posIn];
}
__syncthreads();
// Write to global memory
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posOut = posMbarOut + posMmkOut[j];
if (posMmk < volMmk) dataOut[posOut] = shBuffer[posSh[j]];
}
}
}
//
// Packed method with a split rank
//
// dim nthread(((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize, 1, 1)
// dim nblock(ts.numSplit, min(256, max(1, ts.volMbar)), 1)
//
template <typename T, int numRegStorage>
__global__ void transposePackedSplit(
const int splitDim, const int volMmkUnsplit, const int volMbar,
const int sizeMmk, const int sizeMbar,
const int cMmSplit, const int cMkSplit,
const TensorConvInOut* RESTRICT glMmk,
const TensorConvInOut* RESTRICT glMbar,
const TensorConv* RESTRICT glMsh,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
// Shared memory. max(volSplit)*volMmkUnsplit T elements
extern __shared__ char shBuffer_char[];
T* shBuffer = (T *)shBuffer_char;
const int warpLane = threadIdx.x & (warpSize - 1);
// const int plusone = (blockIdx.x < (splitDim % gridDim.x));
const int p0 = blockIdx.x*splitDim/gridDim.x;
const int volSplit = (blockIdx.x + 1)*splitDim/gridDim.x - p0;
const int plusone = volSplit - splitDim/gridDim.x;
TensorConvInOut Mmk;
Mmk.c_in = 1;
Mmk.d_in = 1;
Mmk.c_out = 1;
Mmk.d_out = 1;
if (warpLane < sizeMmk) {
Mmk = glMmk[warpLane + plusone*sizeMmk];
}
TensorConv Msh;
Msh.c = 1;
Msh.d = 1;
if (warpLane < sizeMmk) {
Msh = glMsh[warpLane + plusone*sizeMmk];
}
// gridDim.x = number of splits
// blockIdx.x = {0 ... gridDim.x - 1} is the split-index
// Volume of this split
// const int volSplit = (splitDim/gridDim.x) + plusone;
// Start position in this split
// const int p0 = (splitDim/gridDim.x)*blockIdx.x + min(blockIdx.x, (splitDim % gridDim.x));
const int posMmkIn0 = p0*cMmSplit;
const int posMmkOut0 = p0*cMkSplit;
// Volume of split Mmk
const int volMmkSplit = volSplit*volMmkUnsplit;
// Pre-compute tensor positions in Mmk
// 3*numRegStorage registers
int posMmkIn[numRegStorage];
int posMmkOut[numRegStorage];
int posSh[numRegStorage];
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
posMmkIn[j] = posMmkIn0;
posMmkOut[j] = posMmkOut0;
posSh[j] = 0;
}
for (int i=0;i < sizeMmk;i++) {
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int t = threadIdx.x + j*blockDim.x;
posMmkIn[j] += ((t/__shfl_sync(0xffffffff,Mmk.c_in,i)) % __shfl_sync(0xffffffff,Mmk.d_in,i))*__shfl_sync(0xffffffff,Mmk.ct_in,i);
posMmkOut[j] += ((t/__shfl_sync(0xffffffff,Mmk.c_out,i)) % __shfl_sync(0xffffffff,Mmk.d_out,i))*__shfl_sync(0xffffffff,Mmk.ct_out,i);
posSh[j] += ((t/__shfl_sync(0xffffffff,Msh.c,i)) % __shfl_sync(0xffffffff,Msh.d,i))*__shfl_sync(0xffffffff,Msh.ct,i);
}
}
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = glMbar[warpLane];
}
const int posMbar0 = blockIdx.y*volMbar/gridDim.y;
const int posMbar1 = (blockIdx.y + 1)*volMbar/gridDim.y;
for (int posMbar=posMbar0;posMbar < posMbar1;posMbar++)
// for (int posMbar=blockIdx.y;posMbar < volMbar;posMbar+=gridDim.y)
{
int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarOut += __shfl_xor_sync(0xffffffff,posMbarOut,i);
}
int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarIn += __shfl_xor_sync(0xffffffff,posMbarIn,i);
}
// Read from global memory
__syncthreads();
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posIn = posMbarIn + posMmkIn[j];
if (posMmk < volMmkSplit) shBuffer[posMmk] = dataIn[posIn];
}
// Write to global memory
__syncthreads();
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posOut = posMbarOut + posMmkOut[j];
if (posMmk < volMmkSplit) dataOut[posOut] = shBuffer[posSh[j]];
}
}
}
#if 1
//
// Transpose when the lead dimension is the same, e.g. (1, 2, 3) -> (1, 3, 2)
//
// dim3 numthread(TILEDIM, TILEROWS, 1);
// dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMkBar-1)/TILEDIM+1), 1, plan.volMbar);
//
template <typename T>
__global__ void transposeTiledCopy(
const int numMm, const int volMbar, const int sizeMbar,
const int cuDimMk, const int cuDimMm,
const int2 tiledVol,
const TensorConvInOut* RESTRICT gl_Mbar,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = gl_Mbar[warpLane];
}
const int bx = (blockIdx.x % numMm)*TILEDIM;
const int by = (blockIdx.x / numMm)*TILEDIM;
const int x = bx + threadIdx.x;
const int y = by + threadIdx.y;
const unsigned int mask = __ballot_sync(0xffffffff,(y + warpLane < tiledVol.y))*(x < tiledVol.x);
const int posMinorIn = x + y*cuDimMk;
const int posMinorOut = x + y*cuDimMm;
const int posInAdd = TILEROWS*cuDimMk;
const int posOutAdd = TILEROWS*cuDimMm;
for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z)
{
// Compute global memory positions
int posMajorIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
int posMajorOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMajorIn += __shfl_xor_sync(0xffffffff,posMajorIn,i);
posMajorOut += __shfl_xor_sync(0xffffffff,posMajorOut,i);
}
int posIn = posMajorIn + posMinorIn;
int posOut = posMajorOut + posMinorOut;
// Variables where values are stored
T val[TILEDIM/TILEROWS];
// Read global memory
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// if ((x < tiledVol.x) && (y + j < tiledVol.y)) {
if ((mask & (1 << j)) != 0) {
val[j/TILEROWS] = dataIn[posIn];
}
posIn += posInAdd;
}
// Write global memory
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// if ((x < tiledVol.x) && (y + j < tiledVol.y)) {
if ((mask & (1 << j)) != 0) {
dataOut[posOut] = val[j/TILEROWS];
}
posOut += posOutAdd;
}
}
}
#else
//
// Returns scalar tensor position. Each lane has the same p
// NOTE: c and d on inactive warps must be 1 !!
//
__device__ __forceinline__
int tensorPos(
const int p, const int rank, const int c, const int d, const int ct,
const int numLane=warpSize
) {
int r = ((p/c) % d)*ct;
#pragma unroll
for (int i=numLane/2;i >= 1;i/=2) {
r += __shfl_xor_sync(0xffffffff,r,i);
}
return r;
}
//
// Transpose when the lead dimension is the same, e.g. (1, 2, 3) -> (1, 3, 2)
//
// dim3 numthread(TILEDIM, TILEROWS, 1);
// dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMkBar-1)/TILEDIM+1), 1, plan.volMbar);
//
template <typename T>
__global__ void transposeTiledCopy(
const int numMm, const int volMbar, const int sizeMbar,
const int cuDimMk, const int cuDimMm,
const int2 tiledVol,
const TensorConvInOut* RESTRICT gl_Mbar,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = gl_Mbar[warpLane];
}
const int bx = (blockIdx.x % numMm)*TILEDIM;
const int by = (blockIdx.x / numMm)*TILEDIM;
const int x = bx + threadIdx.x;
const int y = by + threadIdx.y;
for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z)
{
// Variables where values are stored
T val[TILEDIM/TILEROWS];
// Read global memory
{
int pos0 = tensorPos(posMbar, sizeMbar, Mbar.c_in, Mbar.d_in, Mbar.ct_in);
pos0 += x + y*cuDimMk;
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
int pos = pos0 + j*cuDimMk;
if ((x < tiledVol.x) && (y + j < tiledVol.y)) {
val[j/TILEROWS] = dataIn[pos];
}
}
}
// Write global memory
{
int pos0 = tensorPos(posMbar, sizeMbar, Mbar.c_out, Mbar.d_out, Mbar.ct_out);
pos0 += x + y*cuDimMm;
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
int pos = pos0 + j*cuDimMm;
if ((x < tiledVol.x) && (y + j < tiledVol.y)) {
dataOut[pos] = val[j/TILEROWS];
}
}
}
}
}
#endif
//######################################################################################
//######################################################################################
//######################################################################################
//
// Sets shared memory bank configuration for all kernels. Needs to be called once per device.
//
void cuttKernelSetSharedMemConfig() {
#define CALL(NREG) cudaCheck(cudaFuncSetSharedMemConfig(transposePacked<float, NREG>, cudaSharedMemBankSizeFourByte ))
#include "calls.h"
#undef CALL
#define CALL(NREG) cudaCheck(cudaFuncSetSharedMemConfig(transposePacked<double, NREG>, cudaSharedMemBankSizeEightByte ))
#include "calls.h"
#undef CALL
#define CALL(NREG) cudaCheck(cudaFuncSetSharedMemConfig(transposePackedSplit<float, NREG>, cudaSharedMemBankSizeFourByte ))
#include "calls.h"
#undef CALL
#define CALL(NREG) cudaCheck(cudaFuncSetSharedMemConfig(transposePackedSplit<double, NREG>, cudaSharedMemBankSizeEightByte ))
#include "calls.h"
#undef CALL
cudaCheck(cudaFuncSetSharedMemConfig(transposeTiled<float>, cudaSharedMemBankSizeFourByte));
cudaCheck(cudaFuncSetSharedMemConfig(transposeTiledCopy<float>, cudaSharedMemBankSizeFourByte));
cudaCheck(cudaFuncSetSharedMemConfig(transposeTiled<double>, cudaSharedMemBankSizeEightByte));
cudaCheck(cudaFuncSetSharedMemConfig(transposeTiledCopy<double>, cudaSharedMemBankSizeEightByte));
}
// Caches for PackedSplit kernels. One cache for all devices
// NOTE: Not thread safe
const int CACHE_SIZE = 100000;
const int MAX_NUMWARP = (1024/32);
const int MAX_NUMTYPE = 2;
static int numDevices = -1;
LRUCache<unsigned long long int, int> nabCache(CACHE_SIZE, -1);
//
// Returns the maximum number of active blocks per SM
//
int getNumActiveBlock(const int method, const int sizeofType, const LaunchConfig& lc,
const int deviceID, const cudaDeviceProp& prop) {
int numActiveBlock;
int numthread = lc.numthread.x * lc.numthread.y * lc.numthread.z;
switch(method) {
case Trivial:
{
// This value does not matter, but should be > 0
numActiveBlock = 1;
}
break;
case Packed:
{
#define CALL0(TYPE, NREG) \
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock, \
transposePacked<TYPE, NREG>, numthread, lc.shmemsize)
switch(lc.numRegStorage) {
#define CALL(ICASE) case ICASE: if (sizeofType == 4) CALL0(float, ICASE); if (sizeofType == 8) CALL0(double, ICASE); break
#include "calls.h"
}
#undef CALL
#undef CALL0
}
break;
case PackedSplit:
{
// Allocate cache structure if needed
if (numDevices == -1) {
cudaCheck(cudaGetDeviceCount(&numDevices));
}
// Build unique key for cache
int key_warp = (numthread/prop.warpSize - 1);
if (key_warp >= MAX_NUMWARP) {
printf("getNumActiveBlock maximum number of warps exceeded\n");
exit(1);
}
int key_reg = (lc.numRegStorage - 1);
int key_type = (sizeofType == 4);
unsigned long long int key =
(unsigned long long int)(lc.shmemsize/sizeofType)*MAX_NUMWARP*MAX_REG_STORAGE*MAX_NUMTYPE*numDevices +
(unsigned long long int)deviceID*MAX_NUMWARP*MAX_REG_STORAGE*MAX_NUMTYPE +
(unsigned long long int)key_type*MAX_NUMWARP*MAX_REG_STORAGE +
(unsigned long long int)key_reg*MAX_NUMWARP +
(unsigned long long int)key_warp;
numActiveBlock = nabCache.get(key);
if (numActiveBlock == -1) {
// key not found in cache, determine value and add it to cache
#define CALL0(TYPE, NREG) \
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock, \
transposePackedSplit<TYPE, NREG>, numthread, lc.shmemsize)
switch(lc.numRegStorage) {
#define CALL(ICASE) case ICASE: if (sizeofType == 4) CALL0(float, ICASE); if (sizeofType == 8) CALL0(double, ICASE); break
#include "calls.h"
}
#undef CALL
#undef CALL0
nabCache.set(key, numActiveBlock);
}
}
break;
case Tiled:
{
if (sizeofType == 4) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiled<float>, numthread, lc.shmemsize);
} else {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiled<double>, numthread, lc.shmemsize);
}
}
break;
case TiledCopy:
{
if (sizeofType == 4) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiledCopy<float>, numthread, lc.shmemsize);
} else {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiledCopy<double>, numthread, lc.shmemsize);
}
}
break;
}
return numActiveBlock;
}
//
// Sets up kernel launch configuration
//
// Returns the number of active blocks per SM that can be achieved on the Packed kernel
// NOTE: Returns 0 when kernel execution is not possible
//
// Sets:
// lc.numthread
// lc.numblock
// lc.shmemsize
// lc.numRegStorage (for Packed method)
//
int cuttKernelLaunchConfiguration(const int sizeofType, const TensorSplit& ts,
const int deviceID, const cudaDeviceProp& prop, LaunchConfig& lc) {
// Return value of numActiveBlock
int numActiveBlockReturn = -1;
switch(ts.method) {
case Trivial:
{
// These values don't matter
lc.numthread.x = 1;
lc.numthread.y = 1;
lc.numthread.z = 1;
lc.numblock.x = 1;
lc.numblock.y = 1;
lc.numblock.z = 1;
lc.numblock.z = 1;
lc.numblock.z = 1;
lc.shmemsize = 0;
lc.numRegStorage = 0;
}
break;
case Packed:
{
// Amount of shared memory required
lc.shmemsize = ts.shmemAlloc(sizeofType); //ts.volMmk*sizeofType;
// Check that we're not using too much shared memory per block
if (lc.shmemsize > prop.sharedMemPerBlock) {
// printf("lc.shmemsize %d prop.sharedMemPerBlock %d\n", lc.shmemsize, prop.sharedMemPerBlock);
return 0;
}
// Min and max number of threads we can use
int minNumthread = ((ts.volMmk - 1)/(prop.warpSize*MAX_REG_STORAGE) + 1)*prop.warpSize;
int maxNumthread = ((ts.volMmk - 1)/(prop.warpSize) + 1)*prop.warpSize;
if (minNumthread > prop.maxThreadsPerBlock) return 0;
maxNumthread = min(prop.maxThreadsPerBlock, maxNumthread);
// printf("minNumthread %d maxNumthread %d\n", minNumthread, maxNumthread);
// Min and max number of register storage we can use
int minNumRegStorage = (ts.volMmk - 1)/maxNumthread + 1;
int maxNumRegStorage = (ts.volMmk - 1)/minNumthread + 1;
// printf("minNumRegStorage %d maxNumRegStorage %d\n", minNumRegStorage, maxNumRegStorage);
int bestVal = 0;
int bestNumRegStorage = 0;
int bestNumActiveBlock = 0;
lc.numthread.y = 1;
lc.numthread.z = 1;
lc.numblock.x = max(1, ts.volMbar);
lc.numblock.x = min(prop.multiProcessorCount*18, lc.numblock.x);
lc.numblock.y = 1;
lc.numblock.z = 1;
for (lc.numRegStorage=minNumRegStorage;lc.numRegStorage <= maxNumRegStorage;lc.numRegStorage++) {
lc.numthread.x = ((ts.volMmk - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
int numActiveBlock = getNumActiveBlock(ts.method, sizeofType, lc, deviceID, prop);
// int val = numActiveBlock*lc.numthread.x;
int val = ts.volMmkUsed()*numActiveBlock;
if (val > bestVal) {
bestVal = val;
bestNumRegStorage = lc.numRegStorage;
bestNumActiveBlock = numActiveBlock;
}
}
if (bestNumRegStorage == 0) return 0;
lc.numRegStorage = bestNumRegStorage;
lc.numthread.x = ((ts.volMmk - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
numActiveBlockReturn = bestNumActiveBlock;
}
break;
case PackedSplit:
{
// Amount of shared memory required
lc.shmemsize = ts.shmemAlloc(sizeofType);
// Check that we're not using too much shared memory per block
if (lc.shmemsize > prop.sharedMemPerBlock) {
// printf("lc.shmemsize %d prop.sharedMemPerBlock %d\n", lc.shmemsize, prop.sharedMemPerBlock);
return 0;
}
int volMmkWithSplit = (ts.splitDim/ts.numSplit + ((ts.splitDim % ts.numSplit) > 0))*ts.volMmkUnsplit;
// Min and max number of threads we can use
int minNumthread = ((volMmkWithSplit - 1)/(prop.warpSize*MAX_REG_STORAGE) + 1)*prop.warpSize;
int maxNumthread = ((volMmkWithSplit - 1)/(prop.warpSize) + 1)*prop.warpSize;
if (minNumthread > prop.maxThreadsPerBlock) return 0;
maxNumthread = min(prop.maxThreadsPerBlock, maxNumthread);
// printf("minNumthread %d maxNumthread %d\n", minNumthread, maxNumthread);
// Min and max number of register storage we can use
int minNumRegStorage = (volMmkWithSplit - 1)/maxNumthread + 1;
int maxNumRegStorage = (volMmkWithSplit - 1)/minNumthread + 1;
// printf("minNumRegStorage %d maxNumRegStorage %d\n", minNumRegStorage, maxNumRegStorage);
int bestVal = 0;
int bestNumRegStorage = 0;
int bestNumActiveBlock = 0;
lc.numthread.y = 1;
lc.numthread.z = 1;
lc.numblock.x = ts.numSplit;
lc.numblock.y = max(1, min((prop.multiProcessorCount*18)/lc.numblock.x, ts.volMbar));
lc.numblock.z = 1;
for (lc.numRegStorage=minNumRegStorage;lc.numRegStorage <= maxNumRegStorage;lc.numRegStorage++) {
lc.numthread.x = ((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
int numActiveBlock = getNumActiveBlock(ts.method, sizeofType, lc, deviceID, prop);
// int val = numActiveBlock*lc.numthread.x*lc.numRegStorage;
int val = ts.volMmkUsed()*numActiveBlock;
if (val > bestVal) {
bestVal = val;
bestNumRegStorage = lc.numRegStorage;
bestNumActiveBlock = numActiveBlock;
}
}
if (bestNumRegStorage == 0) return 0;
lc.numRegStorage = bestNumRegStorage;
lc.numthread.x = ((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
numActiveBlockReturn = bestNumActiveBlock;
}
break;
case Tiled:
{
lc.numthread.x = TILEDIM;
lc.numthread.y = TILEROWS;
lc.numthread.z = 1;
lc.numblock.x = ((ts.volMm - 1)/TILEDIM + 1)*((ts.volMk - 1)/TILEDIM + 1);
lc.numblock.y = 1;
lc.numblock.z = max(1, min((prop.multiProcessorCount*8)/(lc.numblock.x*lc.numblock.y), ts.volMbar));
lc.shmemsize = 0;
lc.numRegStorage = 0;
}
break;
case TiledCopy:
{
lc.numthread.x = TILEDIM;
lc.numthread.y = TILEROWS;
lc.numthread.z = 1;
lc.numblock.x = ((ts.volMm - 1)/TILEDIM + 1)*((ts.volMkBar - 1)/TILEDIM + 1);
lc.numblock.y = 1;
lc.numblock.z = ts.volMbar;
lc.numblock.z = min((prop.multiProcessorCount*8)/(lc.numblock.x*lc.numblock.y), lc.numblock.z);
lc.numblock.z = max(1, lc.numblock.z);
lc.shmemsize = 0;
lc.numRegStorage = 0;
}
break;
}
if (lc.numblock.x > prop.maxGridSize[0] ||
lc.numblock.y > prop.maxGridSize[1] ||
lc.numblock.z > prop.maxGridSize[2]) return 0;
// Return the number of active blocks with these settings
if (numActiveBlockReturn == -1) {
// Not set, get it
numActiveBlockReturn = getNumActiveBlock(ts.method, sizeofType, lc, deviceID, prop);
}
return numActiveBlockReturn;
}
bool cuttKernel(cuttPlan_t& plan, void* dataIn, void* dataOut) {
LaunchConfig& lc = plan.launchConfig;
TensorSplit& ts = plan.tensorSplit;
switch(ts.method) {
case Trivial:
{
cudaCheck(cudaMemcpyAsync(dataOut, dataIn, ts.volMmk*ts.volMbar*plan.sizeofType,
cudaMemcpyDefault, plan.stream));
}
break;
case Packed:
{
switch(lc.numRegStorage) {
#define CALL0(TYPE, NREG) \
transposePacked<TYPE, NREG> <<< lc.numblock, lc.numthread, lc.shmemsize, plan.stream >>> \
(ts.volMmk, ts.volMbar, ts.sizeMmk, ts.sizeMbar, \
plan.Mmk, plan.Mbar, plan.Msh, (TYPE *)dataIn, (TYPE *)dataOut)
#define CALL(ICASE) case ICASE: if (plan.sizeofType == 4) CALL0(float, ICASE); if (plan.sizeofType == 8) CALL0(double, ICASE); break
#include "calls.h"
default:
printf("cuttKernel no template implemented for numRegStorage %d\n", lc.numRegStorage);
return false;
#undef CALL
#undef CALL0
}
}
break;
case PackedSplit:
{
switch(lc.numRegStorage) {
#define CALL0(TYPE, NREG) \
transposePackedSplit<TYPE, NREG> <<< lc.numblock, lc.numthread, lc.shmemsize, plan.stream >>> \
(ts.splitDim, ts.volMmkUnsplit, ts. volMbar, ts.sizeMmk, ts.sizeMbar, \
plan.cuDimMm, plan.cuDimMk, plan.Mmk, plan.Mbar, plan.Msh, (TYPE *)dataIn, (TYPE *)dataOut)
#define CALL(ICASE) case ICASE: if (plan.sizeofType == 4) CALL0(float, ICASE); if (plan.sizeofType == 8) CALL0(double, ICASE); break
#include "calls.h"
default:
printf("cuttKernel no template implemented for numRegStorage %d\n", lc.numRegStorage);
return false;
#undef CALL
#undef CALL0
}
}
break;
case Tiled:
{
#define CALL(TYPE) \
transposeTiled<TYPE> <<< lc.numblock, lc.numthread, 0, plan.stream >>> \
(((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.tiledVol, plan.cuDimMk, plan.cuDimMm, \
plan.Mbar, (TYPE *)dataIn, (TYPE *)dataOut)
if (plan.sizeofType == 4) CALL(float);
if (plan.sizeofType == 8) CALL(double);
#undef CALL
}
break;
case TiledCopy:
{
#define CALL(TYPE) \
transposeTiledCopy<TYPE> <<< lc.numblock, lc.numthread, 0, plan.stream >>> \
(((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.cuDimMk, plan.cuDimMm, plan.tiledVol, \
plan.Mbar, (TYPE *)dataIn, (TYPE *)dataOut)
if (plan.sizeofType == 4) CALL(float);
if (plan.sizeofType == 8) CALL(double);
#undef CALL
}
break;
}
cudaCheck(cudaGetLastError());
return true;
}
|
39287a9dc2096ba4f3aae96affd5469281212b91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "tnn/device/cuda/acc/cuda_layer_acc.h"
#include "tnn/utils/dims_utils.h"
namespace TNN_NS {
DECLARE_CUDA_ACC(Tile, LAYER_REPEAT);
__global__ void tile_kernel(int count, const float *input, float *output, const int* input_dims, const int* output_dims, int size) {
CUDA_KERNEL_LOOP(index, count) {
int offset = 0;
int prod = count;
for (int i = 0; i < size; i++) {
prod /= output_dims[i];
int mod = index / prod % input_dims[i];
offset = offset * input_dims[i] + mod;
}
output[index] = input[offset];
}
}
Status CudaTileLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return CudaLayerAcc::Init(context, param, resource, inputs, outputs);
}
Status CudaTileLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
this->is_reshaped = false;
return TNN_OK;
}
Status CudaTileLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
if (tempbufs_.size() == 0) {
auto output_dims = outputs[0]->GetBlobDesc().dims;
CreateTempBuf(output_dims.size() * sizeof(int));
CreateTempBuf(output_dims.size() * sizeof(int));
}
if (!this->is_reshaped) {
auto input_dims = inputs[0]->GetBlobDesc().dims;
auto output_dims = outputs[0]->GetBlobDesc().dims;
while (input_dims.size() < output_dims.size()) {
input_dims.insert(input_dims.begin(), 1);
}
hipMemcpyAsync(tempbufs_[0].ptr, input_dims.data(), input_dims.size()*sizeof(int),
hipMemcpyHostToDevice, context_->GetStream());
hipMemcpyAsync(tempbufs_[1].ptr, output_dims.data(), output_dims.size()*sizeof(int),
hipMemcpyHostToDevice, context_->GetStream());
this->is_reshaped = true;
}
int count = DimsVectorUtils::Count(outputs[0]->GetBlobDesc().dims);
float* input_data = static_cast<float*>(inputs[0]->GetHandle().base);
float* output_data = static_cast<float*>(outputs[0]->GetHandle().base);
hipLaunchKernelGGL(( tile_kernel), dim3(TNN_CUDA_GET_BLOCKS(count)), dim3(TNN_CUDA_NUM_THREADS), 0, context_->GetStream(),
count, input_data, output_data, (const int *)tempbufs_[0].ptr, (const int *)tempbufs_[1].ptr,
inputs[0]->GetBlobDesc().dims.size());
return TNN_OK;
}
REGISTER_CUDA_ACC(Tile, LAYER_REPEAT);
} // namespace TNN_NS
| 39287a9dc2096ba4f3aae96affd5469281212b91.cu | // Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "tnn/device/cuda/acc/cuda_layer_acc.h"
#include "tnn/utils/dims_utils.h"
namespace TNN_NS {
DECLARE_CUDA_ACC(Tile, LAYER_REPEAT);
__global__ void tile_kernel(int count, const float *input, float *output, const int* input_dims, const int* output_dims, int size) {
CUDA_KERNEL_LOOP(index, count) {
int offset = 0;
int prod = count;
for (int i = 0; i < size; i++) {
prod /= output_dims[i];
int mod = index / prod % input_dims[i];
offset = offset * input_dims[i] + mod;
}
output[index] = input[offset];
}
}
Status CudaTileLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return CudaLayerAcc::Init(context, param, resource, inputs, outputs);
}
Status CudaTileLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
this->is_reshaped = false;
return TNN_OK;
}
Status CudaTileLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
if (tempbufs_.size() == 0) {
auto output_dims = outputs[0]->GetBlobDesc().dims;
CreateTempBuf(output_dims.size() * sizeof(int));
CreateTempBuf(output_dims.size() * sizeof(int));
}
if (!this->is_reshaped) {
auto input_dims = inputs[0]->GetBlobDesc().dims;
auto output_dims = outputs[0]->GetBlobDesc().dims;
while (input_dims.size() < output_dims.size()) {
input_dims.insert(input_dims.begin(), 1);
}
cudaMemcpyAsync(tempbufs_[0].ptr, input_dims.data(), input_dims.size()*sizeof(int),
cudaMemcpyHostToDevice, context_->GetStream());
cudaMemcpyAsync(tempbufs_[1].ptr, output_dims.data(), output_dims.size()*sizeof(int),
cudaMemcpyHostToDevice, context_->GetStream());
this->is_reshaped = true;
}
int count = DimsVectorUtils::Count(outputs[0]->GetBlobDesc().dims);
float* input_data = static_cast<float*>(inputs[0]->GetHandle().base);
float* output_data = static_cast<float*>(outputs[0]->GetHandle().base);
tile_kernel<<<TNN_CUDA_GET_BLOCKS(count), TNN_CUDA_NUM_THREADS, 0, context_->GetStream()>>>(
count, input_data, output_data, (const int *)tempbufs_[0].ptr, (const int *)tempbufs_[1].ptr,
inputs[0]->GetBlobDesc().dims.size());
return TNN_OK;
}
REGISTER_CUDA_ACC(Tile, LAYER_REPEAT);
} // namespace TNN_NS
|
943b44a0d40668a07c210fdc4c2c2400450f9729.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/pooling_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK_EQ(bottom[0]->num_axes(), 4);
const int channels = in_channel_shape_.cpu_data()[0];
const int height = in_channel_shape_.cpu_data()[1];
const int width = in_channel_shape_.cpu_data()[2];
const int kernel_h = kernel_shape_.cpu_data()[0];
const int kernel_w = kernel_shape_.cpu_data()[1];
const int pad_h = pad_shape_.cpu_data()[0];
const int pad_w = pad_shape_.cpu_data()[1];
const int stride_h = stride_shape_.cpu_data()[0];
const int stride_w = stride_shape_.cpu_data()[1];
const int pooled_height = out_shape_.cpu_data()[0];
const int pooled_width = out_shape_.cpu_data()[1];
// LOG(INFO)<<"the entry in forward";
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
}
else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, bottom_data, bottom[0]->num(), channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, pad_h, pad_w, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, bottom_data, bottom[0]->num(), channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, pad_h, pad_w, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> > (
count, bottom_data, bottom[0]->num(), channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w,
rand_idx_.mutable_gpu_data(), top_data);
}
else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> > (
count, bottom_data, bottom[0]->num(), channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
//LOG(INFO)<<"the entry in backward";
if (!propagate_down[0]) {
return;
}
CHECK_EQ(bottom[0]->num_axes(), 4);
const int channels = in_channel_shape_.cpu_data()[0];
const int height = in_channel_shape_.cpu_data()[1];
const int width = in_channel_shape_.cpu_data()[2];
const int kernel_h = kernel_shape_.cpu_data()[0];
const int kernel_w = kernel_shape_.cpu_data()[1];
const int pad_h = pad_shape_.cpu_data()[0];
const int pad_w = pad_shape_.cpu_data()[1];
const int stride_h = stride_shape_.cpu_data()[0];
const int stride_w = stride_shape_.cpu_data()[1];
const int pooled_height = out_shape_.cpu_data()[0];
const int pooled_width = out_shape_.cpu_data()[1];
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
}
else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, top_diff, mask, top_mask, top[0]->num(), channels,
height, width, pooled_height, pooled_width,
kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, top_diff, top[0]->num(), channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, pad_h, pad_w, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels, height, width, pooled_height,
pooled_width, kernel_h, kernel_w, stride_h, stride_w,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
| 943b44a0d40668a07c210fdc4c2c2400450f9729.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/pooling_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK_EQ(bottom[0]->num_axes(), 4);
const int channels = in_channel_shape_.cpu_data()[0];
const int height = in_channel_shape_.cpu_data()[1];
const int width = in_channel_shape_.cpu_data()[2];
const int kernel_h = kernel_shape_.cpu_data()[0];
const int kernel_w = kernel_shape_.cpu_data()[1];
const int pad_h = pad_shape_.cpu_data()[0];
const int pad_w = pad_shape_.cpu_data()[1];
const int stride_h = stride_shape_.cpu_data()[0];
const int stride_w = stride_shape_.cpu_data()[1];
const int pooled_height = out_shape_.cpu_data()[0];
const int pooled_width = out_shape_.cpu_data()[1];
// LOG(INFO)<<"the entry in forward";
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
}
else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, bottom_data, bottom[0]->num(), channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, pad_h, pad_w, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, bottom_data, bottom[0]->num(), channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, pad_h, pad_w, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> > (
count, bottom_data, bottom[0]->num(), channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w,
rand_idx_.mutable_gpu_data(), top_data);
}
else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> > (
count, bottom_data, bottom[0]->num(), channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
//LOG(INFO)<<"the entry in backward";
if (!propagate_down[0]) {
return;
}
CHECK_EQ(bottom[0]->num_axes(), 4);
const int channels = in_channel_shape_.cpu_data()[0];
const int height = in_channel_shape_.cpu_data()[1];
const int width = in_channel_shape_.cpu_data()[2];
const int kernel_h = kernel_shape_.cpu_data()[0];
const int kernel_w = kernel_shape_.cpu_data()[1];
const int pad_h = pad_shape_.cpu_data()[0];
const int pad_w = pad_shape_.cpu_data()[1];
const int stride_h = stride_shape_.cpu_data()[0];
const int stride_w = stride_shape_.cpu_data()[1];
const int pooled_height = out_shape_.cpu_data()[0];
const int pooled_width = out_shape_.cpu_data()[1];
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
}
else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, top_diff, mask, top_mask, top[0]->num(), channels,
height, width, pooled_height, pooled_width,
kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, top_diff, top[0]->num(), channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, pad_h, pad_w, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels, height, width, pooled_height,
pooled_width, kernel_h, kernel_w, stride_h, stride_w,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
|
440fd3ef431a613effeccfa47f295a3e17eeb46b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//note: please do not modify this file manually!
// this file has been generated automatically by BOAST version 0.99996
// by: make boast_kernels
/*
!=====================================================================
!
! S p e c f e m 3 D G l o b e V e r s i o n 7 . 0
! --------------------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA
! and CNRS / University of Marseille, France
! (there are currently many more authors!)
! (c) Princeton University and CNRS / University of Marseille, April 2014
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#ifndef INDEX2
#define INDEX2(isize,i,j) i + isize*j
#endif
#ifndef INDEX3
#define INDEX3(isize,jsize,i,j,k) i + isize*(j + jsize*k)
#endif
#ifndef INDEX4
#define INDEX4(isize,jsize,ksize,i,j,k,x) i + isize*(j + jsize*(k + ksize*x))
#endif
#ifndef INDEX5
#define INDEX5(isize,jsize,ksize,xsize,i,j,k,x,y) i + isize*(j + jsize*(k + ksize*(x + xsize*y)))
#endif
#ifndef NDIM
#define NDIM 3
#endif
#ifndef NGLLX
#define NGLLX 5
#endif
#ifndef NGLL2
#define NGLL2 25
#endif
#ifndef NGLL3
#define NGLL3 125
#endif
#ifndef NGLL3_PADDED
#define NGLL3_PADDED 128
#endif
#ifndef N_SLS
#define N_SLS 3
#endif
#ifndef IREGION_CRUST_MANTLE
#define IREGION_CRUST_MANTLE 1
#endif
#ifndef IREGION_INNER_CORE
#define IREGION_INNER_CORE 3
#endif
#ifndef IFLAG_IN_FICTITIOUS_CUBE
#define IFLAG_IN_FICTITIOUS_CUBE 11
#endif
#ifndef R_EARTH_KM
#define R_EARTH_KM 6371.0f
#endif
#ifndef COLORING_MIN_NSPEC_INNER_CORE
#define COLORING_MIN_NSPEC_INNER_CORE 1000
#endif
#ifndef COLORING_MIN_NSPEC_OUTER_CORE
#define COLORING_MIN_NSPEC_OUTER_CORE 1000
#endif
#ifndef BLOCKSIZE_TRANSFER
#define BLOCKSIZE_TRANSFER 256
#endif
__global__ void prepare_boundary_accel_on_device(const float * d_accel, float * d_send_accel_buffer, const int num_interfaces, const int max_nibool_interfaces, const int * d_nibool_interfaces, const int * d_ibool_interfaces){
int id;
int iglob;
int iloc;
int iinterface;
id = threadIdx.x + (blockIdx.x) * (blockDim.x) + ((gridDim.x) * (blockDim.x)) * (threadIdx.y + (blockIdx.y) * (blockDim.y));
for (iinterface = 0; iinterface <= num_interfaces - (1); iinterface += 1) {
if (id < d_nibool_interfaces[iinterface]) {
iloc = id + (max_nibool_interfaces) * (iinterface);
iglob = d_ibool_interfaces[iloc] - (1);
d_send_accel_buffer[(iloc) * (3) + 0] = d_accel[(iglob) * (3) + 0];
d_send_accel_buffer[(iloc) * (3) + 1] = d_accel[(iglob) * (3) + 1];
d_send_accel_buffer[(iloc) * (3) + 2] = d_accel[(iglob) * (3) + 2];
}
}
}
| 440fd3ef431a613effeccfa47f295a3e17eeb46b.cu | //note: please do not modify this file manually!
// this file has been generated automatically by BOAST version 0.99996
// by: make boast_kernels
/*
!=====================================================================
!
! S p e c f e m 3 D G l o b e V e r s i o n 7 . 0
! --------------------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA
! and CNRS / University of Marseille, France
! (there are currently many more authors!)
! (c) Princeton University and CNRS / University of Marseille, April 2014
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#ifndef INDEX2
#define INDEX2(isize,i,j) i + isize*j
#endif
#ifndef INDEX3
#define INDEX3(isize,jsize,i,j,k) i + isize*(j + jsize*k)
#endif
#ifndef INDEX4
#define INDEX4(isize,jsize,ksize,i,j,k,x) i + isize*(j + jsize*(k + ksize*x))
#endif
#ifndef INDEX5
#define INDEX5(isize,jsize,ksize,xsize,i,j,k,x,y) i + isize*(j + jsize*(k + ksize*(x + xsize*y)))
#endif
#ifndef NDIM
#define NDIM 3
#endif
#ifndef NGLLX
#define NGLLX 5
#endif
#ifndef NGLL2
#define NGLL2 25
#endif
#ifndef NGLL3
#define NGLL3 125
#endif
#ifndef NGLL3_PADDED
#define NGLL3_PADDED 128
#endif
#ifndef N_SLS
#define N_SLS 3
#endif
#ifndef IREGION_CRUST_MANTLE
#define IREGION_CRUST_MANTLE 1
#endif
#ifndef IREGION_INNER_CORE
#define IREGION_INNER_CORE 3
#endif
#ifndef IFLAG_IN_FICTITIOUS_CUBE
#define IFLAG_IN_FICTITIOUS_CUBE 11
#endif
#ifndef R_EARTH_KM
#define R_EARTH_KM 6371.0f
#endif
#ifndef COLORING_MIN_NSPEC_INNER_CORE
#define COLORING_MIN_NSPEC_INNER_CORE 1000
#endif
#ifndef COLORING_MIN_NSPEC_OUTER_CORE
#define COLORING_MIN_NSPEC_OUTER_CORE 1000
#endif
#ifndef BLOCKSIZE_TRANSFER
#define BLOCKSIZE_TRANSFER 256
#endif
__global__ void prepare_boundary_accel_on_device(const float * d_accel, float * d_send_accel_buffer, const int num_interfaces, const int max_nibool_interfaces, const int * d_nibool_interfaces, const int * d_ibool_interfaces){
int id;
int iglob;
int iloc;
int iinterface;
id = threadIdx.x + (blockIdx.x) * (blockDim.x) + ((gridDim.x) * (blockDim.x)) * (threadIdx.y + (blockIdx.y) * (blockDim.y));
for (iinterface = 0; iinterface <= num_interfaces - (1); iinterface += 1) {
if (id < d_nibool_interfaces[iinterface]) {
iloc = id + (max_nibool_interfaces) * (iinterface);
iglob = d_ibool_interfaces[iloc] - (1);
d_send_accel_buffer[(iloc) * (3) + 0] = d_accel[(iglob) * (3) + 0];
d_send_accel_buffer[(iloc) * (3) + 1] = d_accel[(iglob) * (3) + 1];
d_send_accel_buffer[(iloc) * (3) + 2] = d_accel[(iglob) * (3) + 2];
}
}
}
|
d0518c4c36db0b547940d084e958a33c91b0f627.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void plus_one_kernel(int num_comp, int *y, int *x){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < num_comp){
y[i] = x[i] + 1;
}
}
| d0518c4c36db0b547940d084e958a33c91b0f627.cu | __global__ void plus_one_kernel(int num_comp, int *y, int *x){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < num_comp){
y[i] = x[i] + 1;
}
}
|
4d683abe9f92be76fb8ed164fda817639557d632.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Constants.h"
extern "C"{
__device__ void SEBAL_EnergyBalance_G(
float SWd,
float LWd,
float albedo,
float emissivity,
float LST_K,
float NDVI,
float Uref,
float SAVI,
float a,
float b,
float Rg_24h,
float Tao_24h,
float * z0m,
float * U_star,
float * r_ah,
float * Rn,
float * G0,
float * H,
float * LE,
float * evap_fr,
float * Rn_24h,
float * LE_24h,
float * ET_24h)
{
// *z0m = expf(-5.809f+5.62f*SAVI);
equation:z0m:*
/* Classification */
bool I_snow = (NDVI<0.0f) && (albedo>0.47f);
bool I_water = (NDVI==-1.0f);
/* % NOTE: esat_WL is only used for the wet-limit. To get a true upperlimit for the sensible heat
% the Landsurface Temperature is used as a proxy instead of air temperature.
%% Net Radiation */
// float SWnet = (1.0f - albedo) * SWd; /* Shortwave Net Radiation [W/m2] */
float SWnet = 0.0f;
equation:SWnet
// float LWnet = emissivity*LWd - emissivity*Sigma_SB*LST_K*LST_K*LST_K*LST_K; /* Longwave Net Radiation [W/m2] */
float LWnet = 0.0f;
equation:LWnet
// *Rn = SWnet+LWnet; /* Total Net Radiation [W/m2] */
equation:Rn:*
/* Ground Heat Flux */
/* Kustas et al 1993 */
/* Kustas, W.P., Daughtry, C.S.T. van Oevelen P.J.,
Analatytical Treatment of Relationships between Soil heat flux/net radiation and Vegetation Indices,
Remote sensing of environment,46:319-330 (1993) */
// *G0 = *Rn * (((LST_K-T0)/albedo)*(0.0038f*albedo+0.0074*albedo*albedo)*(1.0f-0.98f*NDVI*NDVI*NDVI*NDVI));
equation:G0:*
if (I_water || I_snow)
{
// *G0= 0.3f* *Rn;
equation:G02:*
}
// *U_star = k*Uref/logf(z200/ *z0m);
equation:U_star:*
// *r_ah = logf(z2/z1)/(*U_star*k);
equation:r_ah:*
// *H = p*cp*(b+a*(LST_K - T0))/ *r_ah;
equation:H:*
// *LE = *Rn - *H - *G0;
equation:LE:*
/* Evaporative fraction */
*evap_fr = 0.0f;
if ((*Rn - *G0) != 0.0f)
{
// *evap_fr = *LE/(*Rn-*G0); /* evaporative fraction [] */
equation:evap_fr:*
}
else
{
*evap_fr = 1.0f; /* evaporative fraction upper limit [] (for negative available energy) */
}
// *Rn_24h = Rg_24h*(1-albedo) - 110.0f*Tao_24h;
equation:Rn_24h:*
// *LE_24h = *evap_fr * *Rn_24h;
equation:LE_24h:*
// *ET_24h = (*evap_fr * *Rn_24h*86.4f)/2450.0f;
equation:ET_24h:*
}
__global__ void SEBAL_EnergyBalance_Kernel(
int * comptMask,
float * SWd,
float * LWd,
float * albedo,
float * emissivity,
float * LST_K,
float * NDVI,
float * Uref,
float * SAVI,
float * a,
float * b,
float * Rg_24h,
float * Tao_24h,
float * z0m,
float * Ustar,
float * r_ah,
float * Rn,
float * G0,
float * H,
float * LE,
float * evap_fr,
float * Rn_24h,
float * LE_24h,
float * ET_24h,
int DataSize)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx<DataSize){
*(z0m+idx)=-9999.0f;
*(Ustar+idx)=-9999.0f;
*(r_ah+idx)=-9999.0f;
*(Rn+idx)=-9999.0f;
*(G0+idx)=-9999.0f;
*(H+idx)=-9999.0f;
*(LE+idx)=-9999.0f;
*(evap_fr+idx)=-9999.0f;
*(Rn_24h+idx)=-9999.0f;
*(LE_24h+idx)=-9999.0f;
*(ET_24h+idx)=-9999.0f;
if(comptMask[idx] == 1){
SEBAL_EnergyBalance_G(
SWd[idx],
LWd[idx],
albedo[idx],
emissivity[idx],
LST_K[idx],
NDVI[idx],
Uref[idx],
SAVI[idx],
a[0],
b[0],
Rg_24h[idx],
Tao_24h[idx],
(z0m+idx),
(Ustar+idx),
(r_ah+idx),
(Rn+idx),
(G0+idx),
(H+idx),
(LE+idx),
(evap_fr+idx),
(Rn_24h+idx),
(LE_24h+idx),
(ET_24h+idx));
}
}
}
}
| 4d683abe9f92be76fb8ed164fda817639557d632.cu | #include "Constants.h"
extern "C"{
__device__ void SEBAL_EnergyBalance_G(
float SWd,
float LWd,
float albedo,
float emissivity,
float LST_K,
float NDVI,
float Uref,
float SAVI,
float a,
float b,
float Rg_24h,
float Tao_24h,
float * z0m,
float * U_star,
float * r_ah,
float * Rn,
float * G0,
float * H,
float * LE,
float * evap_fr,
float * Rn_24h,
float * LE_24h,
float * ET_24h)
{
// *z0m = expf(-5.809f+5.62f*SAVI);
equation:z0m:*
/* Classification */
bool I_snow = (NDVI<0.0f) && (albedo>0.47f);
bool I_water = (NDVI==-1.0f);
/* % NOTE: esat_WL is only used for the wet-limit. To get a true upperlimit for the sensible heat
% the Landsurface Temperature is used as a proxy instead of air temperature.
%% Net Radiation */
// float SWnet = (1.0f - albedo) * SWd; /* Shortwave Net Radiation [W/m2] */
float SWnet = 0.0f;
equation:SWnet
// float LWnet = emissivity*LWd - emissivity*Sigma_SB*LST_K*LST_K*LST_K*LST_K; /* Longwave Net Radiation [W/m2] */
float LWnet = 0.0f;
equation:LWnet
// *Rn = SWnet+LWnet; /* Total Net Radiation [W/m2] */
equation:Rn:*
/* Ground Heat Flux */
/* Kustas et al 1993 */
/* Kustas, W.P., Daughtry, C.S.T. van Oevelen P.J.,
Analatytical Treatment of Relationships between Soil heat flux/net radiation and Vegetation Indices,
Remote sensing of environment,46:319-330 (1993) */
// *G0 = *Rn * (((LST_K-T0)/albedo)*(0.0038f*albedo+0.0074*albedo*albedo)*(1.0f-0.98f*NDVI*NDVI*NDVI*NDVI));
equation:G0:*
if (I_water || I_snow)
{
// *G0= 0.3f* *Rn;
equation:G02:*
}
// *U_star = k*Uref/logf(z200/ *z0m);
equation:U_star:*
// *r_ah = logf(z2/z1)/(*U_star*k);
equation:r_ah:*
// *H = p*cp*(b+a*(LST_K - T0))/ *r_ah;
equation:H:*
// *LE = *Rn - *H - *G0;
equation:LE:*
/* Evaporative fraction */
*evap_fr = 0.0f;
if ((*Rn - *G0) != 0.0f)
{
// *evap_fr = *LE/(*Rn-*G0); /* evaporative fraction [] */
equation:evap_fr:*
}
else
{
*evap_fr = 1.0f; /* evaporative fraction upper limit [] (for negative available energy) */
}
// *Rn_24h = Rg_24h*(1-albedo) - 110.0f*Tao_24h;
equation:Rn_24h:*
// *LE_24h = *evap_fr * *Rn_24h;
equation:LE_24h:*
// *ET_24h = (*evap_fr * *Rn_24h*86.4f)/2450.0f;
equation:ET_24h:*
}
__global__ void SEBAL_EnergyBalance_Kernel(
int * comptMask,
float * SWd,
float * LWd,
float * albedo,
float * emissivity,
float * LST_K,
float * NDVI,
float * Uref,
float * SAVI,
float * a,
float * b,
float * Rg_24h,
float * Tao_24h,
float * z0m,
float * Ustar,
float * r_ah,
float * Rn,
float * G0,
float * H,
float * LE,
float * evap_fr,
float * Rn_24h,
float * LE_24h,
float * ET_24h,
int DataSize)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx<DataSize){
*(z0m+idx)=-9999.0f;
*(Ustar+idx)=-9999.0f;
*(r_ah+idx)=-9999.0f;
*(Rn+idx)=-9999.0f;
*(G0+idx)=-9999.0f;
*(H+idx)=-9999.0f;
*(LE+idx)=-9999.0f;
*(evap_fr+idx)=-9999.0f;
*(Rn_24h+idx)=-9999.0f;
*(LE_24h+idx)=-9999.0f;
*(ET_24h+idx)=-9999.0f;
if(comptMask[idx] == 1){
SEBAL_EnergyBalance_G(
SWd[idx],
LWd[idx],
albedo[idx],
emissivity[idx],
LST_K[idx],
NDVI[idx],
Uref[idx],
SAVI[idx],
a[0],
b[0],
Rg_24h[idx],
Tao_24h[idx],
(z0m+idx),
(Ustar+idx),
(r_ah+idx),
(Rn+idx),
(G0+idx),
(H+idx),
(LE+idx),
(evap_fr+idx),
(Rn_24h+idx),
(LE_24h+idx),
(ET_24h+idx));
}
}
}
}
|
6f0d702dcc874bb7e5bd084b83c0c149852e84e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
using namespace std;
__global__ void sumSingleBlock(int* d) {
int tid = threadIdx.x;
// number of participating threads halves on each iteration
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>= 1, stepSize <<= 1) {
// thread must be allowed to write
if(tid < tc) {
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
d[pa] += d[pb];
}
}
}
__global__ void sumSingleBlock2(int* d) {
extern __shared__ int dcopy[];
int tid = threadIdx.x;
dcopy[tid * 2] = d[tid * 2];
dcopy[tid * 2 + 1] = d[tid * 2 + 1];
// number of participating threads halves on each iteration
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>= 1, stepSize <<= 1) {
// thread must be allowed to write
if(tid < tc) {
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
dcopy[pa] += dcopy[pb];
}
}
if(tid == 0) {
d[0] = dcopy[0];
}
}
int main() {
hipError_t status;
const int count = 256;
const int size = count * sizeof(int);
int* h = new int[count];
for (int i = 0; i < count; ++i) {
h[i] = i + 1;
}
int* d;
status = hipMalloc(&d, size);
status = hipMemcpy(d, h, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sumSingleBlock), dim3(1), dim3(count /2), size, 0, d);
int result;
status = hipMemcpy(&result, d, sizeof(int), hipMemcpyHostToDevice);
cout << "Sum is " << result << endl;
delete [] h;
return 0;
}
| 6f0d702dcc874bb7e5bd084b83c0c149852e84e2.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
using namespace std;
__global__ void sumSingleBlock(int* d) {
int tid = threadIdx.x;
// number of participating threads halves on each iteration
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>= 1, stepSize <<= 1) {
// thread must be allowed to write
if(tid < tc) {
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
d[pa] += d[pb];
}
}
}
__global__ void sumSingleBlock2(int* d) {
extern __shared__ int dcopy[];
int tid = threadIdx.x;
dcopy[tid * 2] = d[tid * 2];
dcopy[tid * 2 + 1] = d[tid * 2 + 1];
// number of participating threads halves on each iteration
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>= 1, stepSize <<= 1) {
// thread must be allowed to write
if(tid < tc) {
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
dcopy[pa] += dcopy[pb];
}
}
if(tid == 0) {
d[0] = dcopy[0];
}
}
int main() {
cudaError_t status;
const int count = 256;
const int size = count * sizeof(int);
int* h = new int[count];
for (int i = 0; i < count; ++i) {
h[i] = i + 1;
}
int* d;
status = cudaMalloc(&d, size);
status = cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
sumSingleBlock<<<1, count /2, size>>>(d);
int result;
status = cudaMemcpy(&result, d, sizeof(int), cudaMemcpyHostToDevice);
cout << "Sum is " << result << endl;
delete [] h;
return 0;
}
|
c3a8e01a24387730514713232e035b775c4f308e.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Reduce.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/ReduceOps.h>
namespace at::native {
template <typename scalar_t, typename out_t=scalar_t>
void std_var_kernel_impl(TensorIterator& iter, double correction, bool take_sqrt) {
// reducing unrolling factor to 2 for welford kernel
// This is necessary to lower register usage that leads to register spills.
using accscalar_t = at::acc_type<scalar_t, true>;
using ops_t = WelfordOps<scalar_t, accscalar_t, int32_t, thrust::pair<out_t, out_t>>;
ops_t ops(static_cast<accscalar_t>(correction), take_sqrt);
gpu_reduce_kernel<scalar_t, out_t, 2>(iter, ops, typename ops_t::acc_t{});
}
static void std_var_kernel_cuda(TensorIterator& iter, double correction, bool take_sqrt) {
const auto input_dtype = iter.input_dtype();
if (input_dtype == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
std_var_kernel_impl<at::Half, float>(iter, correction, take_sqrt);
} else if (input_dtype == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
std_var_kernel_impl<at::BFloat16, float>(iter, correction, take_sqrt);
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
iter.dtype(), "std_cuda", [&]() {
std_var_kernel_impl<scalar_t>(iter, correction, take_sqrt);
});
}
}
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void mean_kernel_impl(TensorIterator& iter) {
// returns acc_t for all non-complex dtypes and returns T for c10::complex<T>
using factor_t = typename c10::scalar_value_type<acc_t>::type;
factor_t factor = static_cast<factor_t>(iter.num_output_elements()) / iter.numel();
gpu_reduce_kernel<scalar_t, out_t>(iter, MeanOps<acc_t, factor_t> {factor});
}
static void mean_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == kHalf) {
mean_kernel_impl<at::Half, float>(iter);
} else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
mean_kernel_impl<at::Half, float, float>(iter);
} else if(iter.dtype() == kBFloat16) {
mean_kernel_impl<at::BFloat16, float>(iter);
} else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
mean_kernel_impl<at::BFloat16, float, float>(iter);
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "mean_cuda", [&]() {
mean_kernel_impl<scalar_t>(iter);
});
}
}
REGISTER_DISPATCH(std_var_stub, &std_var_kernel_cuda);
REGISTER_DISPATCH(mean_stub, &mean_kernel_cuda);
} // namespace at::native
| c3a8e01a24387730514713232e035b775c4f308e.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Reduce.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/ReduceOps.h>
namespace at::native {
template <typename scalar_t, typename out_t=scalar_t>
void std_var_kernel_impl(TensorIterator& iter, double correction, bool take_sqrt) {
// reducing unrolling factor to 2 for welford kernel
// This is necessary to lower register usage that leads to register spills.
using accscalar_t = at::acc_type<scalar_t, true>;
using ops_t = WelfordOps<scalar_t, accscalar_t, int32_t, thrust::pair<out_t, out_t>>;
ops_t ops(static_cast<accscalar_t>(correction), take_sqrt);
gpu_reduce_kernel<scalar_t, out_t, 2>(iter, ops, typename ops_t::acc_t{});
}
static void std_var_kernel_cuda(TensorIterator& iter, double correction, bool take_sqrt) {
const auto input_dtype = iter.input_dtype();
if (input_dtype == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
std_var_kernel_impl<at::Half, float>(iter, correction, take_sqrt);
} else if (input_dtype == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
std_var_kernel_impl<at::BFloat16, float>(iter, correction, take_sqrt);
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
iter.dtype(), "std_cuda", [&]() {
std_var_kernel_impl<scalar_t>(iter, correction, take_sqrt);
});
}
}
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void mean_kernel_impl(TensorIterator& iter) {
// returns acc_t for all non-complex dtypes and returns T for c10::complex<T>
using factor_t = typename c10::scalar_value_type<acc_t>::type;
factor_t factor = static_cast<factor_t>(iter.num_output_elements()) / iter.numel();
gpu_reduce_kernel<scalar_t, out_t>(iter, MeanOps<acc_t, factor_t> {factor});
}
static void mean_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == kHalf) {
mean_kernel_impl<at::Half, float>(iter);
} else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
mean_kernel_impl<at::Half, float, float>(iter);
} else if(iter.dtype() == kBFloat16) {
mean_kernel_impl<at::BFloat16, float>(iter);
} else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
mean_kernel_impl<at::BFloat16, float, float>(iter);
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "mean_cuda", [&]() {
mean_kernel_impl<scalar_t>(iter);
});
}
}
REGISTER_DISPATCH(std_var_stub, &std_var_kernel_cuda);
REGISTER_DISPATCH(mean_stub, &mean_kernel_cuda);
} // namespace at::native
|
422231e5b2fedb5bb367fd43cdf7f6d9612a503a.hip | // !!! This is a file automatically generated by hipify!!!
/* Host code for the Jacobi method of solving a system of linear equations
* by iteration.
* Build as follws: make clean && make
* Author: Naga Kandasamy
* Date modified: May 21, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "jacobi_iteration.h"
/* Include the kernel code */
#include "jacobi_iteration_kernel.hip"
/* Uncomment the line below if you want the code to spit out debug information. */
/* #define DEBUG */
int main(int argc, char **argv)
{
if (argc > 1) {
printf("This program accepts no arguments\n");
exit(EXIT_FAILURE);
}
matrix_t A; /* N x N constant matrix */
matrix_t B; /* N x 1 b matrix */
matrix_t reference_x; /* Reference solution */
matrix_t gpu_naive_solution_x; /* Solution computed by naive kernel */
matrix_t gpu_opt_solution_x; /* Solution computed by optimized kernel */
/* Initialize the random number generator */
srand(time(NULL));
/* Generate diagonally dominant matrix */
printf("\nGenerating %d x %d system\n", MATRIX_SIZE, MATRIX_SIZE);
A = create_diagonally_dominant_matrix(MATRIX_SIZE, MATRIX_SIZE);
if (A.elements == NULL) {
printf("Error creating matrix\n");
exit(EXIT_FAILURE);
}
/* Create the other vectors */
B = allocate_matrix_on_host(MATRIX_SIZE, 1, 1);
reference_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
gpu_naive_solution_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
gpu_opt_solution_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
#ifdef DEBUG
print_matrix(A);
print_matrix(B);
print_matrix(reference_x);
#endif
struct timeval start, stop;
gettimeofday(&start, NULL);
/* Compute Jacobi solution on CPU */
printf("\nPerforming Jacobi iteration on the CPU\n");
compute_gold(A, reference_x, B);
display_jacobi_solution(A, reference_x, B); /* Display statistics */
gettimeofday(&stop, NULL);
fprintf(stderr, "Execution time for CPU = %fs\n", (float)(stop.tv_sec - start.tv_sec +\
(stop.tv_usec - start.tv_usec) / (float)1000000));
/* Compute Jacobi solution on device. Solutions are returned in gpu_naive_solution_x and gpu_opt_solution_x. */
printf("\nPerforming Jacobi iteration on device\n");
compute_on_device(A, gpu_naive_solution_x, gpu_opt_solution_x, B);
printf("\nShowing results for gpu_naive_solution\n");
display_jacobi_solution(A, gpu_naive_solution_x, B); /* Display statistics */
printf("\nShowing results for gpu_opt_solution\n");
display_jacobi_solution(A, gpu_opt_solution_x, B);
free(A.elements);
free(B.elements);
free(reference_x.elements);
free(gpu_naive_solution_x.elements);
free(gpu_opt_solution_x.elements);
exit(EXIT_SUCCESS);
}
/* FIXME: Complete this function to perform Jacobi calculation on device */
void compute_on_device(const matrix_t A, matrix_t gpu_naive_sol_x, matrix_t gpu_opt_sol_x, const matrix_t B)
{
int done = 0;
int num_iter = 0;
double ssd, mse;
double *d_ssd = NULL; /* Pointer to device address holding ssd */
/* Allocate matrices to hold iteration values */
matrix_t new_x_naive = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
matrix_t new_x_opt = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
struct timeval start, stop;
/* initialize solution of x for GPU */
for (unsigned int i = 0; i < A.num_rows; i++){
float e = B.elements[i];
gpu_naive_sol_x.elements[i] = e;
gpu_opt_sol_x.elements[i] = e;
}
/* Allocating space on device for matricies on the GPU with error checking */
matrix_t device_A = allocate_matrix_on_device(A);
matrix_t device_naive_sol_x = allocate_matrix_on_device(gpu_naive_sol_x);
matrix_t device_opt_sol_x = allocate_matrix_on_device(gpu_opt_sol_x);
matrix_t device_B = allocate_matrix_on_device(B);
matrix_t device_new_x_naive = allocate_matrix_on_device(new_x_naive);
matrix_t device_new_x_opt = allocate_matrix_on_device(new_x_opt);
/* Copying matricies A, B, and x solutions to GPU with error checking */
copy_matrix_to_device(device_A, A);
copy_matrix_to_device(device_B, B);
copy_matrix_to_device(device_naive_sol_x, gpu_naive_sol_x);;
copy_matrix_to_device(device_opt_sol_x, gpu_opt_sol_x);
/* Allocating space for the device ssd on the GPU */
hipMalloc((void**) &d_ssd, sizeof(double));
/* Allocating space for the lock and initializing mutex/locks on the GPU */
int *mutex_on_device = NULL;
hipMalloc((void **) &mutex_on_device, sizeof(int));
hipMemset(mutex_on_device, 0, sizeof(int));
printf("\nPerforming Jacobi Naive \n");
/* Setting up the execution configuration for the naive kernel */
dim3 thread_block(1, THREAD_BLOCK_SIZE, 1);
dim3 grid(1, (A.num_rows + THREAD_BLOCK_SIZE - 1)/ THREAD_BLOCK_SIZE);
gettimeofday(&start, NULL);
while (!done){
hipMemset(d_ssd, 0.0, sizeof(double));
/* using jacboi iteration kernel naive */
hipLaunchKernelGGL(( jacobi_iteration_kernel_naive), dim3(grid), dim3(thread_block), 0, 0, device_A, device_naive_sol_x, device_new_x_naive, device_B, mutex_on_device, d_ssd);
hipDeviceSynchronize();
check_CUDA_error("KERNEL FAILURE: jacobi_iteration_kernel_naive\n");
hipLaunchKernelGGL(( jacobi_update_x), dim3(grid),dim3(thread_block), 0, 0, device_naive_sol_x, device_new_x_naive);
hipDeviceSynchronize();
check_CUDA_error("KERNEL FAILURE: jacobi_update_x");
/* Check for convergence and update the unknowns. */
hipMemcpy(&ssd, d_ssd, sizeof(double), hipMemcpyDeviceToHost);
num_iter++;
mse = sqrt(ssd); /* Mean squared error. */
if (mse <= THRESHOLD){
done = 1;
printf ("\nConvergence achieved after %d iterations \n", num_iter);
}
// printf ("Iteration: %d. MSE = %f\n", num_iter, mse);
}
gettimeofday(&stop, NULL);
fprintf(stderr, "Execution time for GPU-Naive = %fs\n", (float)(stop.tv_sec - start.tv_sec +\
(stop.tv_usec - start.tv_usec) / (float)1000000));
printf("\nPerforming Jacobi Optimized \n");
/* Jacobi optimized kernel */
thread_block.x = thread_block.y = TILE_SIZE;
grid.x = 1;
grid.y = (gpu_opt_sol_x.num_rows + TILE_SIZE - 1)/TILE_SIZE;
done = 0;
num_iter = 0;
gettimeofday(&start, NULL);
while (!done){
hipMemset(d_ssd, 0.0, sizeof(double));
/* using jacboi iteration kernel optimized */
hipLaunchKernelGGL(( jacobi_iteration_kernel_optimized), dim3(grid), dim3(thread_block), 0, 0, device_A, device_opt_sol_x, device_new_x_opt, device_B, mutex_on_device, d_ssd);
hipDeviceSynchronize();
check_CUDA_error("KERNEL FAILURE: jacobi_iteration_kernel_optimized\n");
hipLaunchKernelGGL(( jacobi_update_x), dim3(grid),dim3(thread_block), 0, 0, device_opt_sol_x, device_new_x_opt);
hipDeviceSynchronize();
check_CUDA_error("KERNEL FAILURE: jacobi_update_x");
/* Check for convergence and update the unknowns. */
hipMemcpy(&ssd, d_ssd, sizeof (double), hipMemcpyDeviceToHost);
num_iter++;
mse = sqrt(ssd);
if (mse <= THRESHOLD){
done = 1;
printf ("\nConvergence achieved after %d iterations \n", num_iter);
}
// printf ("Iteration: %d. MSE = %f\n", num_iter, mse);
}
gettimeofday(&stop, NULL);
fprintf(stderr, "Execution time for GPU-Optimized = %fs\n", (float)(stop.tv_sec - start.tv_sec +\
(stop.tv_usec - start.tv_usec)/(float)1000000));
/* Copying the solutions back from GPU */
copy_matrix_from_device(gpu_naive_sol_x, device_naive_sol_x);
check_CUDA_error("Copying matrix device_naive_sol_x from device");
copy_matrix_from_device(gpu_opt_sol_x, device_opt_sol_x);
check_CUDA_error("Copying matrix device_opt_sol_x from device");
/* Freeing memory on GPU/ Clean up device memory */
hipFree(device_A.elements);
hipFree(device_B.elements);
hipFree(device_naive_sol_x.elements);
hipFree(device_opt_sol_x.elements);
hipFree(d_ssd);
hipFree(mutex_on_device);
hipFree(device_new_x_naive.elements);
hipFree(device_new_x_opt.elements);
free(new_x_naive.elements);
free(new_x_opt.elements);
return;
}
/* Allocate matrix on the device of same size as M */
matrix_t allocate_matrix_on_device(const matrix_t M)
{
matrix_t Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
hipMalloc((void **)&Mdevice.elements, size);
return Mdevice;
}
/* Allocate a matrix of dimensions height * width.
If init == 0, initialize to all zeroes.
If init == 1, perform random initialization.
*/
matrix_t allocate_matrix_on_host(int num_rows, int num_columns, int init)
{
matrix_t M;
M.num_columns = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float *)malloc(size * sizeof(float));
for (unsigned int i = 0; i < size; i++) {
if (init == 0)
M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
/* Copy matrix to device */
void copy_matrix_to_device(matrix_t Mdevice, const matrix_t Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice);
return;
}
/* Copy matrix from device to host */
void copy_matrix_from_device(matrix_t Mhost, const matrix_t Mdevice)
{
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost);
return;
}
/* Prints the matrix out to screen */
void print_matrix(const matrix_t M)
{
for (unsigned int i = 0; i < M.num_rows; i++) {
for (unsigned int j = 0; j < M.num_columns; j++) {
printf("%f ", M.elements[i * M.num_columns + j]);
}
printf("\n");
}
printf("\n");
return;
}
/* Returns a floating-point value between [min, max] */
float get_random_number(int min, int max)
{
float r = rand()/(float)RAND_MAX;
return (float)floor((double)(min + (max - min + 1) * r));
}
/* Check for errors in kernel execution */
void check_CUDA_error(const char *msg)
{
hipError_t err = hipGetLastError();
if ( hipSuccess != err) {
printf("CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
return;
}
/* Create diagonally dominant matrix */
matrix_t create_diagonally_dominant_matrix(unsigned int num_rows, unsigned int num_columns)
{
matrix_t M;
M.num_columns = num_columns;
M.num_rows = num_rows;
unsigned int size = M.num_rows * M.num_columns;
M.elements = (float *)malloc(size * sizeof(float));
if (M.elements == NULL)
return M;
/* Create a matrix with random numbers between [-.5 and .5] */
unsigned int i, j;
for (i = 0; i < size; i++)
M.elements[i] = get_random_number (MIN_NUMBER, MAX_NUMBER);
/* Make diagonal entries large with respect to the entries on each row. */
for (i = 0; i < num_rows; i++) {
float row_sum = 0.0;
for (j = 0; j < num_columns; j++) {
row_sum += fabs(M.elements[i * M.num_rows + j]);
}
M.elements[i * M.num_rows + i] = 0.5 + row_sum;
}
return M;
}
| 422231e5b2fedb5bb367fd43cdf7f6d9612a503a.cu | /* Host code for the Jacobi method of solving a system of linear equations
* by iteration.
* Build as follws: make clean && make
* Author: Naga Kandasamy
* Date modified: May 21, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include "jacobi_iteration.h"
/* Include the kernel code */
#include "jacobi_iteration_kernel.cu"
/* Uncomment the line below if you want the code to spit out debug information. */
/* #define DEBUG */
int main(int argc, char **argv)
{
if (argc > 1) {
printf("This program accepts no arguments\n");
exit(EXIT_FAILURE);
}
matrix_t A; /* N x N constant matrix */
matrix_t B; /* N x 1 b matrix */
matrix_t reference_x; /* Reference solution */
matrix_t gpu_naive_solution_x; /* Solution computed by naive kernel */
matrix_t gpu_opt_solution_x; /* Solution computed by optimized kernel */
/* Initialize the random number generator */
srand(time(NULL));
/* Generate diagonally dominant matrix */
printf("\nGenerating %d x %d system\n", MATRIX_SIZE, MATRIX_SIZE);
A = create_diagonally_dominant_matrix(MATRIX_SIZE, MATRIX_SIZE);
if (A.elements == NULL) {
printf("Error creating matrix\n");
exit(EXIT_FAILURE);
}
/* Create the other vectors */
B = allocate_matrix_on_host(MATRIX_SIZE, 1, 1);
reference_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
gpu_naive_solution_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
gpu_opt_solution_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
#ifdef DEBUG
print_matrix(A);
print_matrix(B);
print_matrix(reference_x);
#endif
struct timeval start, stop;
gettimeofday(&start, NULL);
/* Compute Jacobi solution on CPU */
printf("\nPerforming Jacobi iteration on the CPU\n");
compute_gold(A, reference_x, B);
display_jacobi_solution(A, reference_x, B); /* Display statistics */
gettimeofday(&stop, NULL);
fprintf(stderr, "Execution time for CPU = %fs\n", (float)(stop.tv_sec - start.tv_sec +\
(stop.tv_usec - start.tv_usec) / (float)1000000));
/* Compute Jacobi solution on device. Solutions are returned in gpu_naive_solution_x and gpu_opt_solution_x. */
printf("\nPerforming Jacobi iteration on device\n");
compute_on_device(A, gpu_naive_solution_x, gpu_opt_solution_x, B);
printf("\nShowing results for gpu_naive_solution\n");
display_jacobi_solution(A, gpu_naive_solution_x, B); /* Display statistics */
printf("\nShowing results for gpu_opt_solution\n");
display_jacobi_solution(A, gpu_opt_solution_x, B);
free(A.elements);
free(B.elements);
free(reference_x.elements);
free(gpu_naive_solution_x.elements);
free(gpu_opt_solution_x.elements);
exit(EXIT_SUCCESS);
}
/* FIXME: Complete this function to perform Jacobi calculation on device */
void compute_on_device(const matrix_t A, matrix_t gpu_naive_sol_x, matrix_t gpu_opt_sol_x, const matrix_t B)
{
int done = 0;
int num_iter = 0;
double ssd, mse;
double *d_ssd = NULL; /* Pointer to device address holding ssd */
/* Allocate matrices to hold iteration values */
matrix_t new_x_naive = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
matrix_t new_x_opt = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
struct timeval start, stop;
/* initialize solution of x for GPU */
for (unsigned int i = 0; i < A.num_rows; i++){
float e = B.elements[i];
gpu_naive_sol_x.elements[i] = e;
gpu_opt_sol_x.elements[i] = e;
}
/* Allocating space on device for matricies on the GPU with error checking */
matrix_t device_A = allocate_matrix_on_device(A);
matrix_t device_naive_sol_x = allocate_matrix_on_device(gpu_naive_sol_x);
matrix_t device_opt_sol_x = allocate_matrix_on_device(gpu_opt_sol_x);
matrix_t device_B = allocate_matrix_on_device(B);
matrix_t device_new_x_naive = allocate_matrix_on_device(new_x_naive);
matrix_t device_new_x_opt = allocate_matrix_on_device(new_x_opt);
/* Copying matricies A, B, and x solutions to GPU with error checking */
copy_matrix_to_device(device_A, A);
copy_matrix_to_device(device_B, B);
copy_matrix_to_device(device_naive_sol_x, gpu_naive_sol_x);;
copy_matrix_to_device(device_opt_sol_x, gpu_opt_sol_x);
/* Allocating space for the device ssd on the GPU */
cudaMalloc((void**) &d_ssd, sizeof(double));
/* Allocating space for the lock and initializing mutex/locks on the GPU */
int *mutex_on_device = NULL;
cudaMalloc((void **) &mutex_on_device, sizeof(int));
cudaMemset(mutex_on_device, 0, sizeof(int));
printf("\nPerforming Jacobi Naive \n");
/* Setting up the execution configuration for the naive kernel */
dim3 thread_block(1, THREAD_BLOCK_SIZE, 1);
dim3 grid(1, (A.num_rows + THREAD_BLOCK_SIZE - 1)/ THREAD_BLOCK_SIZE);
gettimeofday(&start, NULL);
while (!done){
cudaMemset(d_ssd, 0.0, sizeof(double));
/* using jacboi iteration kernel naive */
jacobi_iteration_kernel_naive<<<grid, thread_block>>>(device_A, device_naive_sol_x, device_new_x_naive, device_B, mutex_on_device, d_ssd);
cudaDeviceSynchronize();
check_CUDA_error("KERNEL FAILURE: jacobi_iteration_kernel_naive\n");
jacobi_update_x<<<grid,thread_block>>>(device_naive_sol_x, device_new_x_naive);
cudaDeviceSynchronize();
check_CUDA_error("KERNEL FAILURE: jacobi_update_x");
/* Check for convergence and update the unknowns. */
cudaMemcpy(&ssd, d_ssd, sizeof(double), cudaMemcpyDeviceToHost);
num_iter++;
mse = sqrt(ssd); /* Mean squared error. */
if (mse <= THRESHOLD){
done = 1;
printf ("\nConvergence achieved after %d iterations \n", num_iter);
}
// printf ("Iteration: %d. MSE = %f\n", num_iter, mse);
}
gettimeofday(&stop, NULL);
fprintf(stderr, "Execution time for GPU-Naive = %fs\n", (float)(stop.tv_sec - start.tv_sec +\
(stop.tv_usec - start.tv_usec) / (float)1000000));
printf("\nPerforming Jacobi Optimized \n");
/* Jacobi optimized kernel */
thread_block.x = thread_block.y = TILE_SIZE;
grid.x = 1;
grid.y = (gpu_opt_sol_x.num_rows + TILE_SIZE - 1)/TILE_SIZE;
done = 0;
num_iter = 0;
gettimeofday(&start, NULL);
while (!done){
cudaMemset(d_ssd, 0.0, sizeof(double));
/* using jacboi iteration kernel optimized */
jacobi_iteration_kernel_optimized<<<grid, thread_block>>>(device_A, device_opt_sol_x, device_new_x_opt, device_B, mutex_on_device, d_ssd);
cudaDeviceSynchronize();
check_CUDA_error("KERNEL FAILURE: jacobi_iteration_kernel_optimized\n");
jacobi_update_x<<<grid,thread_block>>>(device_opt_sol_x, device_new_x_opt);
cudaDeviceSynchronize();
check_CUDA_error("KERNEL FAILURE: jacobi_update_x");
/* Check for convergence and update the unknowns. */
cudaMemcpy(&ssd, d_ssd, sizeof (double), cudaMemcpyDeviceToHost);
num_iter++;
mse = sqrt(ssd);
if (mse <= THRESHOLD){
done = 1;
printf ("\nConvergence achieved after %d iterations \n", num_iter);
}
// printf ("Iteration: %d. MSE = %f\n", num_iter, mse);
}
gettimeofday(&stop, NULL);
fprintf(stderr, "Execution time for GPU-Optimized = %fs\n", (float)(stop.tv_sec - start.tv_sec +\
(stop.tv_usec - start.tv_usec)/(float)1000000));
/* Copying the solutions back from GPU */
copy_matrix_from_device(gpu_naive_sol_x, device_naive_sol_x);
check_CUDA_error("Copying matrix device_naive_sol_x from device");
copy_matrix_from_device(gpu_opt_sol_x, device_opt_sol_x);
check_CUDA_error("Copying matrix device_opt_sol_x from device");
/* Freeing memory on GPU/ Clean up device memory */
cudaFree(device_A.elements);
cudaFree(device_B.elements);
cudaFree(device_naive_sol_x.elements);
cudaFree(device_opt_sol_x.elements);
cudaFree(d_ssd);
cudaFree(mutex_on_device);
cudaFree(device_new_x_naive.elements);
cudaFree(device_new_x_opt.elements);
free(new_x_naive.elements);
free(new_x_opt.elements);
return;
}
/* Allocate matrix on the device of same size as M */
matrix_t allocate_matrix_on_device(const matrix_t M)
{
matrix_t Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
cudaMalloc((void **)&Mdevice.elements, size);
return Mdevice;
}
/* Allocate a matrix of dimensions height * width.
If init == 0, initialize to all zeroes.
If init == 1, perform random initialization.
*/
matrix_t allocate_matrix_on_host(int num_rows, int num_columns, int init)
{
matrix_t M;
M.num_columns = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float *)malloc(size * sizeof(float));
for (unsigned int i = 0; i < size; i++) {
if (init == 0)
M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
/* Copy matrix to device */
void copy_matrix_to_device(matrix_t Mdevice, const matrix_t Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice);
return;
}
/* Copy matrix from device to host */
void copy_matrix_from_device(matrix_t Mhost, const matrix_t Mdevice)
{
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost);
return;
}
/* Prints the matrix out to screen */
void print_matrix(const matrix_t M)
{
for (unsigned int i = 0; i < M.num_rows; i++) {
for (unsigned int j = 0; j < M.num_columns; j++) {
printf("%f ", M.elements[i * M.num_columns + j]);
}
printf("\n");
}
printf("\n");
return;
}
/* Returns a floating-point value between [min, max] */
float get_random_number(int min, int max)
{
float r = rand()/(float)RAND_MAX;
return (float)floor((double)(min + (max - min + 1) * r));
}
/* Check for errors in kernel execution */
void check_CUDA_error(const char *msg)
{
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err) {
printf("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return;
}
/* Create diagonally dominant matrix */
matrix_t create_diagonally_dominant_matrix(unsigned int num_rows, unsigned int num_columns)
{
matrix_t M;
M.num_columns = num_columns;
M.num_rows = num_rows;
unsigned int size = M.num_rows * M.num_columns;
M.elements = (float *)malloc(size * sizeof(float));
if (M.elements == NULL)
return M;
/* Create a matrix with random numbers between [-.5 and .5] */
unsigned int i, j;
for (i = 0; i < size; i++)
M.elements[i] = get_random_number (MIN_NUMBER, MAX_NUMBER);
/* Make diagonal entries large with respect to the entries on each row. */
for (i = 0; i < num_rows; i++) {
float row_sum = 0.0;
for (j = 0; j < num_columns; j++) {
row_sum += fabs(M.elements[i * M.num_rows + j]);
}
M.elements[i * M.num_rows + i] = 0.5 + row_sum;
}
return M;
}
|
17577a1dc1eec947803557e9ac7a9f91e9f0a1af.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "IVFPQ.cuh"
#include "../GpuResources.h"
#include "BroadcastSum.cuh"
#include "Distance.cuh"
#include "FlatIndex_hip.cuh"
#include "InvertedListAppend.cuh"
#include "L2Norm_hip.cuh"
#include "PQCodeDistances_hip.cuh"
#include "PQScanMultiPassNoPrecomputed.cuh"
#include "PQScanMultiPassPrecomputed.cuh"
#include "RemapIndices.h"
#include "VectorResidual_hip.cuh"
#include "../utils/DeviceDefs.cuh"
#include "../utils/DeviceUtils.h"
#include "../utils/HostTensor.cuh"
#include "../utils/MatrixMult.cuh"
#include "../utils/NoTypeTensor.cuh"
#include "../utils/Transpose.cuh"
#include <limits>
#include <thrust/host_vector.h>
#include <unordered_map>
namespace faiss { namespace gpu {
IVFPQ::IVFPQ(GpuResources* resources,
FlatIndex* quantizer,
int numSubQuantizers,
int bitsPerSubQuantizer,
float* pqCentroidData,
IndicesOptions indicesOptions,
bool useFloat16LookupTables,
MemorySpace space) :
IVFBase(resources,
quantizer,
numSubQuantizers,
indicesOptions,
space),
numSubQuantizers_(numSubQuantizers),
bitsPerSubQuantizer_(bitsPerSubQuantizer),
numSubQuantizerCodes_(utils::pow2(bitsPerSubQuantizer_)),
dimPerSubQuantizer_(dim_ / numSubQuantizers),
precomputedCodes_(false),
useFloat16LookupTables_(useFloat16LookupTables) {
FAISS_ASSERT(pqCentroidData);
FAISS_ASSERT(bitsPerSubQuantizer_ <= 8);
FAISS_ASSERT(dim_ % numSubQuantizers_ == 0);
FAISS_ASSERT(isSupportedPQCodeLength(bytesPerVector_));
#ifndef FAISS_USE_FLOAT16
FAISS_ASSERT(!useFloat16LookupTables_);
#endif
setPQCentroids_(pqCentroidData);
}
IVFPQ::~IVFPQ() {
}
bool
IVFPQ::isSupportedPQCodeLength(int size) {
switch (size) {
case 1:
case 2:
case 3:
case 4:
case 8:
case 12:
case 16:
case 20:
case 24:
case 28:
case 32:
case 40:
case 48:
case 56: // only supported with float16
case 64: // only supported with float16
case 96: // only supported with float16
return true;
default:
return false;
}
}
bool
IVFPQ::isSupportedNoPrecomputedSubDimSize(int dims) {
return faiss::gpu::isSupportedNoPrecomputedSubDimSize(dims);
}
void
IVFPQ::setPrecomputedCodes(bool enable) {
if (precomputedCodes_ != enable) {
precomputedCodes_ = enable;
if (precomputedCodes_) {
precomputeCodes_();
} else {
// Clear out old precomputed code data
precomputedCode_ = std::move(DeviceTensor<float, 3, true>());
#ifdef FAISS_USE_FLOAT16
precomputedCodeHalf_ = std::move(DeviceTensor<half, 3, true>());
#endif
}
}
}
int
IVFPQ::classifyAndAddVectors(Tensor<float, 2, true>& vecs,
Tensor<long, 1, true>& indices) {
FAISS_ASSERT(vecs.getSize(0) == indices.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == dim_);
FAISS_ASSERT(!quantizer_->getUseFloat16());
auto& coarseCentroids = quantizer_->getVectorsFloat32Ref();
auto& mem = resources_->getMemoryManagerCurrentDevice();
auto stream = resources_->getDefaultStreamCurrentDevice();
// Number of valid vectors that we actually add; we return this
int numAdded = 0;
// We don't actually need this
DeviceTensor<float, 2, true> listDistance(mem, {vecs.getSize(0), 1}, stream);
// We use this
DeviceTensor<int, 2, true> listIds2d(mem, {vecs.getSize(0), 1}, stream);
auto listIds = listIds2d.view<1>({vecs.getSize(0)});
quantizer_->query(vecs, 1, listDistance, listIds2d, false);
// Copy the lists that we wish to append to back to the CPU
// FIXME: really this can be into pinned memory and a true async
// copy on a different stream; we can start the copy early, but it's
// tiny
HostTensor<int, 1, true> listIdsHost(listIds, stream);
// Calculate the residual for each closest centroid
DeviceTensor<float, 2, true> residuals(
mem, {vecs.getSize(0), vecs.getSize(1)}, stream);
runCalcResidual(vecs, coarseCentroids, listIds, residuals, stream);
// Residuals are in the form
// (vec x numSubQuantizer x dimPerSubQuantizer)
// transpose to
// (numSubQuantizer x vec x dimPerSubQuantizer)
auto residualsView = residuals.view<3>(
{residuals.getSize(0), numSubQuantizers_, dimPerSubQuantizer_});
DeviceTensor<float, 3, true> residualsTranspose(
mem,
{numSubQuantizers_, residuals.getSize(0), dimPerSubQuantizer_},
stream);
runTransposeAny(residualsView, 0, 1, residualsTranspose, stream);
// Get the product quantizer centroids in the form
// (numSubQuantizer x numSubQuantizerCodes x dimPerSubQuantizer)
// which is pqCentroidsMiddleCode_
// We now have a batch operation to find the top-1 distances:
// batch size: numSubQuantizer
// centroids: (numSubQuantizerCodes x dimPerSubQuantizer)
// residuals: (vec x dimPerSubQuantizer)
// => (numSubQuantizer x vec x 1)
DeviceTensor<float, 3, true> closestSubQDistance(
mem, {numSubQuantizers_, residuals.getSize(0), 1}, stream);
DeviceTensor<int, 3, true> closestSubQIndex(
mem, {numSubQuantizers_, residuals.getSize(0), 1}, stream);
for (int subQ = 0; subQ < numSubQuantizers_; ++subQ) {
auto closestSubQDistanceView = closestSubQDistance[subQ].view();
auto closestSubQIndexView = closestSubQIndex[subQ].view();
auto pqCentroidsMiddleCodeView = pqCentroidsMiddleCode_[subQ].view();
auto residualsTransposeView = residualsTranspose[subQ].view();
runL2Distance(resources_,
pqCentroidsMiddleCodeView,
true, // pqCentroidsMiddleCodeView is row major
nullptr, // no precomputed norms
residualsTransposeView,
true, // residualsTransposeView is row major
1,
closestSubQDistanceView,
closestSubQIndexView,
// We don't care about distances
true);
}
// Now, we have the nearest sub-q centroid for each slice of the
// residual vector.
auto closestSubQIndexView = closestSubQIndex.view<2>(
{numSubQuantizers_, residuals.getSize(0)});
// Transpose this for easy use
DeviceTensor<int, 2, true> encodings(
mem, {residuals.getSize(0), numSubQuantizers_}, stream);
runTransposeAny(closestSubQIndexView, 0, 1, encodings, stream);
// Now we add the encoded vectors to the individual lists
// First, make sure that there is space available for adding the new
// encoded vectors and indices
// list id -> # being added
std::unordered_map<int, int> assignCounts;
// vector id -> offset in list
// (we already have vector id -> list id in listIds)
HostTensor<int, 1, true> listOffsetHost({listIdsHost.getSize(0)});
for (int i = 0; i < listIdsHost.getSize(0); ++i) {
int listId = listIdsHost[i];
// Add vector could be invalid (contains NaNs etc)
if (listId < 0) {
listOffsetHost[i] = -1;
continue;
}
FAISS_ASSERT(listId < numLists_);
++numAdded;
int offset = deviceListData_[listId]->size() / bytesPerVector_;
auto it = assignCounts.find(listId);
if (it != assignCounts.end()) {
offset += it->second;
it->second++;
} else {
assignCounts[listId] = 1;
}
listOffsetHost[i] = offset;
}
// If we didn't add anything (all invalid vectors), no need to
// continue
if (numAdded == 0) {
return 0;
}
// We need to resize the data structures for the inverted lists on
// the GPUs, which means that they might need reallocation, which
// means that their base address may change. Figure out the new base
// addresses, and update those in a batch on the device
{
// Resize all of the lists that we are appending to
for (auto& counts : assignCounts) {
auto& codes = deviceListData_[counts.first];
codes->resize(codes->size() + counts.second * bytesPerVector_,
stream);
int newNumVecs = (int) (codes->size() / bytesPerVector_);
auto& indices = deviceListIndices_[counts.first];
if ((indicesOptions_ == INDICES_32_BIT) ||
(indicesOptions_ == INDICES_64_BIT)) {
size_t indexSize =
(indicesOptions_ == INDICES_32_BIT) ? sizeof(int) : sizeof(long);
indices->resize(indices->size() + counts.second * indexSize, stream);
} else if (indicesOptions_ == INDICES_CPU) {
// indices are stored on the CPU side
FAISS_ASSERT(counts.first < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[counts.first];
userIndices.resize(newNumVecs);
} else {
// indices are not stored on the GPU or CPU side
FAISS_ASSERT(indicesOptions_ == INDICES_IVF);
}
// This is used by the multi-pass query to decide how much scratch
// space to allocate for intermediate results
maxListLength_ = ::max(maxListLength_, newNumVecs);
}
// Update all pointers and sizes on the device for lists that we
// appended to
{
std::vector<int> listIds(assignCounts.size());
int i = 0;
for (auto& counts : assignCounts) {
listIds[i++] = counts.first;
}
updateDeviceListInfo_(listIds, stream);
}
}
// If we're maintaining the indices on the CPU side, update our
// map. We already resized our map above.
if (indicesOptions_ == INDICES_CPU) {
// We need to maintain the indices on the CPU side
HostTensor<long, 1, true> hostIndices(indices, stream);
for (int i = 0; i < hostIndices.getSize(0); ++i) {
int listId = listIdsHost[i];
// Add vector could be invalid (contains NaNs etc)
if (listId < 0) {
continue;
}
int offset = listOffsetHost[i];
FAISS_ASSERT(listId < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[listId];
FAISS_ASSERT(offset < userIndices.size());
userIndices[offset] = hostIndices[i];
}
}
// We similarly need to actually append the new encoded vectors
{
DeviceTensor<int, 1, true> listOffset(mem, listOffsetHost, stream);
// This kernel will handle appending each encoded vector + index to
// the appropriate list
runIVFPQInvertedListAppend(listIds,
listOffset,
encodings,
indices,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
stream);
}
return numAdded;
}
void
IVFPQ::addCodeVectorsFromCpu(int listId,
const void* codes,
const long* indices,
size_t numVecs) {
// This list must already exist
FAISS_ASSERT(listId < deviceListData_.size());
auto stream = resources_->getDefaultStreamCurrentDevice();
// If there's nothing to add, then there's nothing we have to do
if (numVecs == 0) {
return;
}
size_t lengthInBytes = numVecs * bytesPerVector_;
auto& listCodes = deviceListData_[listId];
auto prevCodeData = listCodes->data();
// We only have int32 length representations on the GPU per each
// list; the length is in sizeof(char)
FAISS_ASSERT(listCodes->size() % bytesPerVector_ == 0);
FAISS_ASSERT(listCodes->size() + lengthInBytes <=
(size_t) std::numeric_limits<int>::max());
listCodes->append((unsigned char*) codes,
lengthInBytes,
stream,
true /* exact reserved size */);
// Handle the indices as well
addIndicesFromCpu_(listId, indices, numVecs);
// This list address may have changed due to vector resizing, but
// only bother updating it on the device if it has changed
if (prevCodeData != listCodes->data()) {
deviceListDataPointers_[listId] = listCodes->data();
}
// And our size has changed too
int listLength = listCodes->size() / bytesPerVector_;
deviceListLengths_[listId] = listLength;
// We update this as well, since the multi-pass algorithm uses it
maxListLength_ = ::max(maxListLength_, listLength);
// device_vector add is potentially happening on a different stream
// than our default stream
if (resources_->getDefaultStreamCurrentDevice() != 0) {
streamWait({stream}, {0});
}
}
void
IVFPQ::setPQCentroids_(float* data) {
size_t pqSize =
numSubQuantizers_ * numSubQuantizerCodes_ * dimPerSubQuantizer_;
// Make sure the data is on the host
// FIXME: why are we doing this?
thrust::host_vector<float> hostMemory;
hostMemory.insert(hostMemory.end(), data, data + pqSize);
HostTensor<float, 3, true> pqHost(
hostMemory.data(),
{numSubQuantizers_, numSubQuantizerCodes_, dimPerSubQuantizer_});
DeviceTensor<float, 3, true> pqDevice(
pqHost,
resources_->getDefaultStreamCurrentDevice());
DeviceTensor<float, 3, true> pqDeviceTranspose(
{numSubQuantizers_, dimPerSubQuantizer_, numSubQuantizerCodes_});
runTransposeAny(pqDevice, 1, 2, pqDeviceTranspose,
resources_->getDefaultStreamCurrentDevice());
pqCentroidsInnermostCode_ = std::move(pqDeviceTranspose);
// Also maintain the PQ centroids in the form
// (sub q)(code id)(sub dim)
DeviceTensor<float, 3, true> pqCentroidsMiddleCode(
{numSubQuantizers_, numSubQuantizerCodes_, dimPerSubQuantizer_});
runTransposeAny(pqCentroidsInnermostCode_, 1, 2, pqCentroidsMiddleCode,
resources_->getDefaultStreamCurrentDevice());
pqCentroidsMiddleCode_ = std::move(pqCentroidsMiddleCode);
}
void
IVFPQ::precomputeCodes_() {
//
// d = || x - y_C ||^2 + || y_R ||^2 + 2 * (y_C|y_R) - 2 * (x|y_R)
// --------------- --------------------------- -------
// term 1 term 2 term 3
//
// Terms 1 and 3 are available only at query time. We compute term 2
// here.
FAISS_ASSERT(!quantizer_->getUseFloat16());
auto& coarseCentroids = quantizer_->getVectorsFloat32Ref();
// Compute ||y_R||^2 by treating
// (sub q)(code id)(sub dim) as (sub q * code id)(sub dim)
auto pqCentroidsMiddleCodeView =
pqCentroidsMiddleCode_.view<2>(
{numSubQuantizers_ * numSubQuantizerCodes_, dimPerSubQuantizer_});
DeviceTensor<float, 1, true> subQuantizerNorms(
{numSubQuantizers_ * numSubQuantizerCodes_});
runL2Norm(pqCentroidsMiddleCodeView, true,
subQuantizerNorms, true,
resources_->getDefaultStreamCurrentDevice());
// Compute 2 * (y_C|y_R) via batch matrix multiplication
// batch size (sub q) x {(centroid id)(sub dim) x (code id)(sub dim)'}
// => (sub q) x {(centroid id)(code id)}
// => (sub q)(centroid id)(code id)
// View (centroid id)(dim) as
// (centroid id)(sub q)(dim)
// Transpose (centroid id)(sub q)(sub dim) to
// (sub q)(centroid id)(sub dim)
auto centroidView = coarseCentroids.view<3>(
{coarseCentroids.getSize(0), numSubQuantizers_, dimPerSubQuantizer_});
DeviceTensor<float, 3, true> centroidsTransposed(
{numSubQuantizers_, coarseCentroids.getSize(0), dimPerSubQuantizer_});
runTransposeAny(centroidView, 0, 1, centroidsTransposed,
resources_->getDefaultStreamCurrentDevice());
DeviceTensor<float, 3, true> coarsePQProduct(
{numSubQuantizers_, coarseCentroids.getSize(0), numSubQuantizerCodes_});
runIteratedMatrixMult(coarsePQProduct, false,
centroidsTransposed, false,
pqCentroidsMiddleCode_, true,
2.0f, 0.0f,
resources_->getBlasHandleCurrentDevice(),
resources_->getDefaultStreamCurrentDevice());
// Transpose (sub q)(centroid id)(code id) to
// (centroid id)(sub q)(code id)
DeviceTensor<float, 3, true> coarsePQProductTransposed(
{coarseCentroids.getSize(0), numSubQuantizers_, numSubQuantizerCodes_});
runTransposeAny(coarsePQProduct, 0, 1, coarsePQProductTransposed,
resources_->getDefaultStreamCurrentDevice());
// View (centroid id)(sub q)(code id) as
// (centroid id)(sub q * code id)
auto coarsePQProductTransposedView = coarsePQProductTransposed.view<2>(
{coarseCentroids.getSize(0), numSubQuantizers_ * numSubQuantizerCodes_});
// Sum || y_R ||^2 + 2 * (y_C|y_R)
// i.e., add norms (sub q * code id)
// along columns of inner product (centroid id)(sub q * code id)
runSumAlongColumns(subQuantizerNorms, coarsePQProductTransposedView,
resources_->getDefaultStreamCurrentDevice());
#ifdef FAISS_USE_FLOAT16
if (useFloat16LookupTables_) {
precomputedCodeHalf_ = toHalf(resources_,
resources_->getDefaultStreamCurrentDevice(),
coarsePQProductTransposed);
return;
}
#endif
// We added into the view, so `coarsePQProductTransposed` is now our
// precomputed term 2.
precomputedCode_ = std::move(coarsePQProductTransposed);
}
void
IVFPQ::query(Tensor<float, 2, true>& queries,
int nprobe,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices) {
// These are caught at a higher level
FAISS_ASSERT(nprobe <= GPU_MAX_SELECTION_K);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
auto& mem = resources_->getMemoryManagerCurrentDevice();
auto stream = resources_->getDefaultStreamCurrentDevice();
nprobe = ::min(nprobe, quantizer_->getSize());
FAISS_ASSERT(queries.getSize(1) == dim_);
FAISS_ASSERT(outDistances.getSize(0) == queries.getSize(0));
FAISS_ASSERT(outIndices.getSize(0) == queries.getSize(0));
// Reserve space for the closest coarse centroids
DeviceTensor<float, 2, true>
coarseDistances(mem, {queries.getSize(0), nprobe}, stream);
DeviceTensor<int, 2, true>
coarseIndices(mem, {queries.getSize(0), nprobe}, stream);
// Find the `nprobe` closest coarse centroids; we can use int
// indices both internally and externally
quantizer_->query(queries,
nprobe,
coarseDistances,
coarseIndices,
true);
if (precomputedCodes_) {
runPQPrecomputedCodes_(queries,
coarseDistances,
coarseIndices,
k,
outDistances,
outIndices);
} else {
runPQNoPrecomputedCodes_(queries,
coarseDistances,
coarseIndices,
k,
outDistances,
outIndices);
}
// If the GPU isn't storing indices (they are on the CPU side), we
// need to perform the re-mapping here
// FIXME: we might ultimately be calling this function with inputs
// from the CPU, these are unnecessary copies
if (indicesOptions_ == INDICES_CPU) {
HostTensor<long, 2, true> hostOutIndices(outIndices, stream);
ivfOffsetToUserIndex(hostOutIndices.data(),
numLists_,
hostOutIndices.getSize(0),
hostOutIndices.getSize(1),
listOffsetToUserIndex_);
// Copy back to GPU, since the input to this function is on the
// GPU
outIndices.copyFrom(hostOutIndices, stream);
}
}
std::vector<unsigned char>
IVFPQ::getListCodes(int listId) const {
FAISS_ASSERT(listId < deviceListData_.size());
return deviceListData_[listId]->copyToHost<unsigned char>(
resources_->getDefaultStreamCurrentDevice());
}
Tensor<float, 3, true>
IVFPQ::getPQCentroids() {
return pqCentroidsMiddleCode_;
}
void
IVFPQ::runPQPrecomputedCodes_(
Tensor<float, 2, true>& queries,
DeviceTensor<float, 2, true>& coarseDistances,
DeviceTensor<int, 2, true>& coarseIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices) {
auto& mem = resources_->getMemoryManagerCurrentDevice();
auto stream = resources_->getDefaultStreamCurrentDevice();
// Compute precomputed code term 3, - 2 * (x|y_R)
// This is done via batch MM
// {sub q} x {(query id)(sub dim) * (code id)(sub dim)'} =>
// {sub q} x {(query id)(code id)}
DeviceTensor<float, 3, true> term3Transposed(
mem,
{queries.getSize(0), numSubQuantizers_, numSubQuantizerCodes_},
stream);
// These allocations within are only temporary, so release them when
// we're done to maximize free space
{
auto querySubQuantizerView = queries.view<3>(
{queries.getSize(0), numSubQuantizers_, dimPerSubQuantizer_});
DeviceTensor<float, 3, true> queriesTransposed(
mem,
{numSubQuantizers_, queries.getSize(0), dimPerSubQuantizer_},
stream);
runTransposeAny(querySubQuantizerView, 0, 1, queriesTransposed, stream);
DeviceTensor<float, 3, true> term3(
mem,
{numSubQuantizers_, queries.getSize(0), numSubQuantizerCodes_},
stream);
runIteratedMatrixMult(term3, false,
queriesTransposed, false,
pqCentroidsMiddleCode_, true,
-2.0f, 0.0f,
resources_->getBlasHandleCurrentDevice(),
stream);
runTransposeAny(term3, 0, 1, term3Transposed, stream);
}
NoTypeTensor<3, true> term2;
NoTypeTensor<3, true> term3;
#ifdef FAISS_USE_FLOAT16
DeviceTensor<half, 3, true> term3Half;
if (useFloat16LookupTables_) {
term3Half = toHalf(resources_, stream, term3Transposed);
term2 = NoTypeTensor<3, true>(precomputedCodeHalf_);
term3 = NoTypeTensor<3, true>(term3Half);
}
#endif
if (!useFloat16LookupTables_) {
term2 = NoTypeTensor<3, true>(precomputedCode_);
term3 = NoTypeTensor<3, true>(term3Transposed);
}
runPQScanMultiPassPrecomputed(queries,
coarseDistances, // term 1
term2, // term 2
term3, // term 3
coarseIndices,
useFloat16LookupTables_,
bytesPerVector_,
numSubQuantizers_,
numSubQuantizerCodes_,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
deviceListLengths_,
maxListLength_,
k,
outDistances,
outIndices,
resources_);
}
void
IVFPQ::runPQNoPrecomputedCodes_(
Tensor<float, 2, true>& queries,
DeviceTensor<float, 2, true>& coarseDistances,
DeviceTensor<int, 2, true>& coarseIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices) {
FAISS_ASSERT(!quantizer_->getUseFloat16());
auto& coarseCentroids = quantizer_->getVectorsFloat32Ref();
runPQScanMultiPassNoPrecomputed(queries,
coarseCentroids,
pqCentroidsInnermostCode_,
coarseIndices,
useFloat16LookupTables_,
bytesPerVector_,
numSubQuantizers_,
numSubQuantizerCodes_,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
deviceListLengths_,
maxListLength_,
k,
outDistances,
outIndices,
resources_);
}
} } // namespace
| 17577a1dc1eec947803557e9ac7a9f91e9f0a1af.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "IVFPQ.cuh"
#include "../GpuResources.h"
#include "BroadcastSum.cuh"
#include "Distance.cuh"
#include "FlatIndex.cuh"
#include "InvertedListAppend.cuh"
#include "L2Norm.cuh"
#include "PQCodeDistances.cuh"
#include "PQScanMultiPassNoPrecomputed.cuh"
#include "PQScanMultiPassPrecomputed.cuh"
#include "RemapIndices.h"
#include "VectorResidual.cuh"
#include "../utils/DeviceDefs.cuh"
#include "../utils/DeviceUtils.h"
#include "../utils/HostTensor.cuh"
#include "../utils/MatrixMult.cuh"
#include "../utils/NoTypeTensor.cuh"
#include "../utils/Transpose.cuh"
#include <limits>
#include <thrust/host_vector.h>
#include <unordered_map>
namespace faiss { namespace gpu {
IVFPQ::IVFPQ(GpuResources* resources,
FlatIndex* quantizer,
int numSubQuantizers,
int bitsPerSubQuantizer,
float* pqCentroidData,
IndicesOptions indicesOptions,
bool useFloat16LookupTables,
MemorySpace space) :
IVFBase(resources,
quantizer,
numSubQuantizers,
indicesOptions,
space),
numSubQuantizers_(numSubQuantizers),
bitsPerSubQuantizer_(bitsPerSubQuantizer),
numSubQuantizerCodes_(utils::pow2(bitsPerSubQuantizer_)),
dimPerSubQuantizer_(dim_ / numSubQuantizers),
precomputedCodes_(false),
useFloat16LookupTables_(useFloat16LookupTables) {
FAISS_ASSERT(pqCentroidData);
FAISS_ASSERT(bitsPerSubQuantizer_ <= 8);
FAISS_ASSERT(dim_ % numSubQuantizers_ == 0);
FAISS_ASSERT(isSupportedPQCodeLength(bytesPerVector_));
#ifndef FAISS_USE_FLOAT16
FAISS_ASSERT(!useFloat16LookupTables_);
#endif
setPQCentroids_(pqCentroidData);
}
IVFPQ::~IVFPQ() {
}
bool
IVFPQ::isSupportedPQCodeLength(int size) {
switch (size) {
case 1:
case 2:
case 3:
case 4:
case 8:
case 12:
case 16:
case 20:
case 24:
case 28:
case 32:
case 40:
case 48:
case 56: // only supported with float16
case 64: // only supported with float16
case 96: // only supported with float16
return true;
default:
return false;
}
}
bool
IVFPQ::isSupportedNoPrecomputedSubDimSize(int dims) {
return faiss::gpu::isSupportedNoPrecomputedSubDimSize(dims);
}
void
IVFPQ::setPrecomputedCodes(bool enable) {
if (precomputedCodes_ != enable) {
precomputedCodes_ = enable;
if (precomputedCodes_) {
precomputeCodes_();
} else {
// Clear out old precomputed code data
precomputedCode_ = std::move(DeviceTensor<float, 3, true>());
#ifdef FAISS_USE_FLOAT16
precomputedCodeHalf_ = std::move(DeviceTensor<half, 3, true>());
#endif
}
}
}
int
IVFPQ::classifyAndAddVectors(Tensor<float, 2, true>& vecs,
Tensor<long, 1, true>& indices) {
FAISS_ASSERT(vecs.getSize(0) == indices.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == dim_);
FAISS_ASSERT(!quantizer_->getUseFloat16());
auto& coarseCentroids = quantizer_->getVectorsFloat32Ref();
auto& mem = resources_->getMemoryManagerCurrentDevice();
auto stream = resources_->getDefaultStreamCurrentDevice();
// Number of valid vectors that we actually add; we return this
int numAdded = 0;
// We don't actually need this
DeviceTensor<float, 2, true> listDistance(mem, {vecs.getSize(0), 1}, stream);
// We use this
DeviceTensor<int, 2, true> listIds2d(mem, {vecs.getSize(0), 1}, stream);
auto listIds = listIds2d.view<1>({vecs.getSize(0)});
quantizer_->query(vecs, 1, listDistance, listIds2d, false);
// Copy the lists that we wish to append to back to the CPU
// FIXME: really this can be into pinned memory and a true async
// copy on a different stream; we can start the copy early, but it's
// tiny
HostTensor<int, 1, true> listIdsHost(listIds, stream);
// Calculate the residual for each closest centroid
DeviceTensor<float, 2, true> residuals(
mem, {vecs.getSize(0), vecs.getSize(1)}, stream);
runCalcResidual(vecs, coarseCentroids, listIds, residuals, stream);
// Residuals are in the form
// (vec x numSubQuantizer x dimPerSubQuantizer)
// transpose to
// (numSubQuantizer x vec x dimPerSubQuantizer)
auto residualsView = residuals.view<3>(
{residuals.getSize(0), numSubQuantizers_, dimPerSubQuantizer_});
DeviceTensor<float, 3, true> residualsTranspose(
mem,
{numSubQuantizers_, residuals.getSize(0), dimPerSubQuantizer_},
stream);
runTransposeAny(residualsView, 0, 1, residualsTranspose, stream);
// Get the product quantizer centroids in the form
// (numSubQuantizer x numSubQuantizerCodes x dimPerSubQuantizer)
// which is pqCentroidsMiddleCode_
// We now have a batch operation to find the top-1 distances:
// batch size: numSubQuantizer
// centroids: (numSubQuantizerCodes x dimPerSubQuantizer)
// residuals: (vec x dimPerSubQuantizer)
// => (numSubQuantizer x vec x 1)
DeviceTensor<float, 3, true> closestSubQDistance(
mem, {numSubQuantizers_, residuals.getSize(0), 1}, stream);
DeviceTensor<int, 3, true> closestSubQIndex(
mem, {numSubQuantizers_, residuals.getSize(0), 1}, stream);
for (int subQ = 0; subQ < numSubQuantizers_; ++subQ) {
auto closestSubQDistanceView = closestSubQDistance[subQ].view();
auto closestSubQIndexView = closestSubQIndex[subQ].view();
auto pqCentroidsMiddleCodeView = pqCentroidsMiddleCode_[subQ].view();
auto residualsTransposeView = residualsTranspose[subQ].view();
runL2Distance(resources_,
pqCentroidsMiddleCodeView,
true, // pqCentroidsMiddleCodeView is row major
nullptr, // no precomputed norms
residualsTransposeView,
true, // residualsTransposeView is row major
1,
closestSubQDistanceView,
closestSubQIndexView,
// We don't care about distances
true);
}
// Now, we have the nearest sub-q centroid for each slice of the
// residual vector.
auto closestSubQIndexView = closestSubQIndex.view<2>(
{numSubQuantizers_, residuals.getSize(0)});
// Transpose this for easy use
DeviceTensor<int, 2, true> encodings(
mem, {residuals.getSize(0), numSubQuantizers_}, stream);
runTransposeAny(closestSubQIndexView, 0, 1, encodings, stream);
// Now we add the encoded vectors to the individual lists
// First, make sure that there is space available for adding the new
// encoded vectors and indices
// list id -> # being added
std::unordered_map<int, int> assignCounts;
// vector id -> offset in list
// (we already have vector id -> list id in listIds)
HostTensor<int, 1, true> listOffsetHost({listIdsHost.getSize(0)});
for (int i = 0; i < listIdsHost.getSize(0); ++i) {
int listId = listIdsHost[i];
// Add vector could be invalid (contains NaNs etc)
if (listId < 0) {
listOffsetHost[i] = -1;
continue;
}
FAISS_ASSERT(listId < numLists_);
++numAdded;
int offset = deviceListData_[listId]->size() / bytesPerVector_;
auto it = assignCounts.find(listId);
if (it != assignCounts.end()) {
offset += it->second;
it->second++;
} else {
assignCounts[listId] = 1;
}
listOffsetHost[i] = offset;
}
// If we didn't add anything (all invalid vectors), no need to
// continue
if (numAdded == 0) {
return 0;
}
// We need to resize the data structures for the inverted lists on
// the GPUs, which means that they might need reallocation, which
// means that their base address may change. Figure out the new base
// addresses, and update those in a batch on the device
{
// Resize all of the lists that we are appending to
for (auto& counts : assignCounts) {
auto& codes = deviceListData_[counts.first];
codes->resize(codes->size() + counts.second * bytesPerVector_,
stream);
int newNumVecs = (int) (codes->size() / bytesPerVector_);
auto& indices = deviceListIndices_[counts.first];
if ((indicesOptions_ == INDICES_32_BIT) ||
(indicesOptions_ == INDICES_64_BIT)) {
size_t indexSize =
(indicesOptions_ == INDICES_32_BIT) ? sizeof(int) : sizeof(long);
indices->resize(indices->size() + counts.second * indexSize, stream);
} else if (indicesOptions_ == INDICES_CPU) {
// indices are stored on the CPU side
FAISS_ASSERT(counts.first < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[counts.first];
userIndices.resize(newNumVecs);
} else {
// indices are not stored on the GPU or CPU side
FAISS_ASSERT(indicesOptions_ == INDICES_IVF);
}
// This is used by the multi-pass query to decide how much scratch
// space to allocate for intermediate results
maxListLength_ = std::max(maxListLength_, newNumVecs);
}
// Update all pointers and sizes on the device for lists that we
// appended to
{
std::vector<int> listIds(assignCounts.size());
int i = 0;
for (auto& counts : assignCounts) {
listIds[i++] = counts.first;
}
updateDeviceListInfo_(listIds, stream);
}
}
// If we're maintaining the indices on the CPU side, update our
// map. We already resized our map above.
if (indicesOptions_ == INDICES_CPU) {
// We need to maintain the indices on the CPU side
HostTensor<long, 1, true> hostIndices(indices, stream);
for (int i = 0; i < hostIndices.getSize(0); ++i) {
int listId = listIdsHost[i];
// Add vector could be invalid (contains NaNs etc)
if (listId < 0) {
continue;
}
int offset = listOffsetHost[i];
FAISS_ASSERT(listId < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[listId];
FAISS_ASSERT(offset < userIndices.size());
userIndices[offset] = hostIndices[i];
}
}
// We similarly need to actually append the new encoded vectors
{
DeviceTensor<int, 1, true> listOffset(mem, listOffsetHost, stream);
// This kernel will handle appending each encoded vector + index to
// the appropriate list
runIVFPQInvertedListAppend(listIds,
listOffset,
encodings,
indices,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
stream);
}
return numAdded;
}
void
IVFPQ::addCodeVectorsFromCpu(int listId,
const void* codes,
const long* indices,
size_t numVecs) {
// This list must already exist
FAISS_ASSERT(listId < deviceListData_.size());
auto stream = resources_->getDefaultStreamCurrentDevice();
// If there's nothing to add, then there's nothing we have to do
if (numVecs == 0) {
return;
}
size_t lengthInBytes = numVecs * bytesPerVector_;
auto& listCodes = deviceListData_[listId];
auto prevCodeData = listCodes->data();
// We only have int32 length representations on the GPU per each
// list; the length is in sizeof(char)
FAISS_ASSERT(listCodes->size() % bytesPerVector_ == 0);
FAISS_ASSERT(listCodes->size() + lengthInBytes <=
(size_t) std::numeric_limits<int>::max());
listCodes->append((unsigned char*) codes,
lengthInBytes,
stream,
true /* exact reserved size */);
// Handle the indices as well
addIndicesFromCpu_(listId, indices, numVecs);
// This list address may have changed due to vector resizing, but
// only bother updating it on the device if it has changed
if (prevCodeData != listCodes->data()) {
deviceListDataPointers_[listId] = listCodes->data();
}
// And our size has changed too
int listLength = listCodes->size() / bytesPerVector_;
deviceListLengths_[listId] = listLength;
// We update this as well, since the multi-pass algorithm uses it
maxListLength_ = std::max(maxListLength_, listLength);
// device_vector add is potentially happening on a different stream
// than our default stream
if (resources_->getDefaultStreamCurrentDevice() != 0) {
streamWait({stream}, {0});
}
}
void
IVFPQ::setPQCentroids_(float* data) {
size_t pqSize =
numSubQuantizers_ * numSubQuantizerCodes_ * dimPerSubQuantizer_;
// Make sure the data is on the host
// FIXME: why are we doing this?
thrust::host_vector<float> hostMemory;
hostMemory.insert(hostMemory.end(), data, data + pqSize);
HostTensor<float, 3, true> pqHost(
hostMemory.data(),
{numSubQuantizers_, numSubQuantizerCodes_, dimPerSubQuantizer_});
DeviceTensor<float, 3, true> pqDevice(
pqHost,
resources_->getDefaultStreamCurrentDevice());
DeviceTensor<float, 3, true> pqDeviceTranspose(
{numSubQuantizers_, dimPerSubQuantizer_, numSubQuantizerCodes_});
runTransposeAny(pqDevice, 1, 2, pqDeviceTranspose,
resources_->getDefaultStreamCurrentDevice());
pqCentroidsInnermostCode_ = std::move(pqDeviceTranspose);
// Also maintain the PQ centroids in the form
// (sub q)(code id)(sub dim)
DeviceTensor<float, 3, true> pqCentroidsMiddleCode(
{numSubQuantizers_, numSubQuantizerCodes_, dimPerSubQuantizer_});
runTransposeAny(pqCentroidsInnermostCode_, 1, 2, pqCentroidsMiddleCode,
resources_->getDefaultStreamCurrentDevice());
pqCentroidsMiddleCode_ = std::move(pqCentroidsMiddleCode);
}
void
IVFPQ::precomputeCodes_() {
//
// d = || x - y_C ||^2 + || y_R ||^2 + 2 * (y_C|y_R) - 2 * (x|y_R)
// --------------- --------------------------- -------
// term 1 term 2 term 3
//
// Terms 1 and 3 are available only at query time. We compute term 2
// here.
FAISS_ASSERT(!quantizer_->getUseFloat16());
auto& coarseCentroids = quantizer_->getVectorsFloat32Ref();
// Compute ||y_R||^2 by treating
// (sub q)(code id)(sub dim) as (sub q * code id)(sub dim)
auto pqCentroidsMiddleCodeView =
pqCentroidsMiddleCode_.view<2>(
{numSubQuantizers_ * numSubQuantizerCodes_, dimPerSubQuantizer_});
DeviceTensor<float, 1, true> subQuantizerNorms(
{numSubQuantizers_ * numSubQuantizerCodes_});
runL2Norm(pqCentroidsMiddleCodeView, true,
subQuantizerNorms, true,
resources_->getDefaultStreamCurrentDevice());
// Compute 2 * (y_C|y_R) via batch matrix multiplication
// batch size (sub q) x {(centroid id)(sub dim) x (code id)(sub dim)'}
// => (sub q) x {(centroid id)(code id)}
// => (sub q)(centroid id)(code id)
// View (centroid id)(dim) as
// (centroid id)(sub q)(dim)
// Transpose (centroid id)(sub q)(sub dim) to
// (sub q)(centroid id)(sub dim)
auto centroidView = coarseCentroids.view<3>(
{coarseCentroids.getSize(0), numSubQuantizers_, dimPerSubQuantizer_});
DeviceTensor<float, 3, true> centroidsTransposed(
{numSubQuantizers_, coarseCentroids.getSize(0), dimPerSubQuantizer_});
runTransposeAny(centroidView, 0, 1, centroidsTransposed,
resources_->getDefaultStreamCurrentDevice());
DeviceTensor<float, 3, true> coarsePQProduct(
{numSubQuantizers_, coarseCentroids.getSize(0), numSubQuantizerCodes_});
runIteratedMatrixMult(coarsePQProduct, false,
centroidsTransposed, false,
pqCentroidsMiddleCode_, true,
2.0f, 0.0f,
resources_->getBlasHandleCurrentDevice(),
resources_->getDefaultStreamCurrentDevice());
// Transpose (sub q)(centroid id)(code id) to
// (centroid id)(sub q)(code id)
DeviceTensor<float, 3, true> coarsePQProductTransposed(
{coarseCentroids.getSize(0), numSubQuantizers_, numSubQuantizerCodes_});
runTransposeAny(coarsePQProduct, 0, 1, coarsePQProductTransposed,
resources_->getDefaultStreamCurrentDevice());
// View (centroid id)(sub q)(code id) as
// (centroid id)(sub q * code id)
auto coarsePQProductTransposedView = coarsePQProductTransposed.view<2>(
{coarseCentroids.getSize(0), numSubQuantizers_ * numSubQuantizerCodes_});
// Sum || y_R ||^2 + 2 * (y_C|y_R)
// i.e., add norms (sub q * code id)
// along columns of inner product (centroid id)(sub q * code id)
runSumAlongColumns(subQuantizerNorms, coarsePQProductTransposedView,
resources_->getDefaultStreamCurrentDevice());
#ifdef FAISS_USE_FLOAT16
if (useFloat16LookupTables_) {
precomputedCodeHalf_ = toHalf(resources_,
resources_->getDefaultStreamCurrentDevice(),
coarsePQProductTransposed);
return;
}
#endif
// We added into the view, so `coarsePQProductTransposed` is now our
// precomputed term 2.
precomputedCode_ = std::move(coarsePQProductTransposed);
}
void
IVFPQ::query(Tensor<float, 2, true>& queries,
int nprobe,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices) {
// These are caught at a higher level
FAISS_ASSERT(nprobe <= GPU_MAX_SELECTION_K);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
auto& mem = resources_->getMemoryManagerCurrentDevice();
auto stream = resources_->getDefaultStreamCurrentDevice();
nprobe = std::min(nprobe, quantizer_->getSize());
FAISS_ASSERT(queries.getSize(1) == dim_);
FAISS_ASSERT(outDistances.getSize(0) == queries.getSize(0));
FAISS_ASSERT(outIndices.getSize(0) == queries.getSize(0));
// Reserve space for the closest coarse centroids
DeviceTensor<float, 2, true>
coarseDistances(mem, {queries.getSize(0), nprobe}, stream);
DeviceTensor<int, 2, true>
coarseIndices(mem, {queries.getSize(0), nprobe}, stream);
// Find the `nprobe` closest coarse centroids; we can use int
// indices both internally and externally
quantizer_->query(queries,
nprobe,
coarseDistances,
coarseIndices,
true);
if (precomputedCodes_) {
runPQPrecomputedCodes_(queries,
coarseDistances,
coarseIndices,
k,
outDistances,
outIndices);
} else {
runPQNoPrecomputedCodes_(queries,
coarseDistances,
coarseIndices,
k,
outDistances,
outIndices);
}
// If the GPU isn't storing indices (they are on the CPU side), we
// need to perform the re-mapping here
// FIXME: we might ultimately be calling this function with inputs
// from the CPU, these are unnecessary copies
if (indicesOptions_ == INDICES_CPU) {
HostTensor<long, 2, true> hostOutIndices(outIndices, stream);
ivfOffsetToUserIndex(hostOutIndices.data(),
numLists_,
hostOutIndices.getSize(0),
hostOutIndices.getSize(1),
listOffsetToUserIndex_);
// Copy back to GPU, since the input to this function is on the
// GPU
outIndices.copyFrom(hostOutIndices, stream);
}
}
std::vector<unsigned char>
IVFPQ::getListCodes(int listId) const {
FAISS_ASSERT(listId < deviceListData_.size());
return deviceListData_[listId]->copyToHost<unsigned char>(
resources_->getDefaultStreamCurrentDevice());
}
Tensor<float, 3, true>
IVFPQ::getPQCentroids() {
return pqCentroidsMiddleCode_;
}
void
IVFPQ::runPQPrecomputedCodes_(
Tensor<float, 2, true>& queries,
DeviceTensor<float, 2, true>& coarseDistances,
DeviceTensor<int, 2, true>& coarseIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices) {
auto& mem = resources_->getMemoryManagerCurrentDevice();
auto stream = resources_->getDefaultStreamCurrentDevice();
// Compute precomputed code term 3, - 2 * (x|y_R)
// This is done via batch MM
// {sub q} x {(query id)(sub dim) * (code id)(sub dim)'} =>
// {sub q} x {(query id)(code id)}
DeviceTensor<float, 3, true> term3Transposed(
mem,
{queries.getSize(0), numSubQuantizers_, numSubQuantizerCodes_},
stream);
// These allocations within are only temporary, so release them when
// we're done to maximize free space
{
auto querySubQuantizerView = queries.view<3>(
{queries.getSize(0), numSubQuantizers_, dimPerSubQuantizer_});
DeviceTensor<float, 3, true> queriesTransposed(
mem,
{numSubQuantizers_, queries.getSize(0), dimPerSubQuantizer_},
stream);
runTransposeAny(querySubQuantizerView, 0, 1, queriesTransposed, stream);
DeviceTensor<float, 3, true> term3(
mem,
{numSubQuantizers_, queries.getSize(0), numSubQuantizerCodes_},
stream);
runIteratedMatrixMult(term3, false,
queriesTransposed, false,
pqCentroidsMiddleCode_, true,
-2.0f, 0.0f,
resources_->getBlasHandleCurrentDevice(),
stream);
runTransposeAny(term3, 0, 1, term3Transposed, stream);
}
NoTypeTensor<3, true> term2;
NoTypeTensor<3, true> term3;
#ifdef FAISS_USE_FLOAT16
DeviceTensor<half, 3, true> term3Half;
if (useFloat16LookupTables_) {
term3Half = toHalf(resources_, stream, term3Transposed);
term2 = NoTypeTensor<3, true>(precomputedCodeHalf_);
term3 = NoTypeTensor<3, true>(term3Half);
}
#endif
if (!useFloat16LookupTables_) {
term2 = NoTypeTensor<3, true>(precomputedCode_);
term3 = NoTypeTensor<3, true>(term3Transposed);
}
runPQScanMultiPassPrecomputed(queries,
coarseDistances, // term 1
term2, // term 2
term3, // term 3
coarseIndices,
useFloat16LookupTables_,
bytesPerVector_,
numSubQuantizers_,
numSubQuantizerCodes_,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
deviceListLengths_,
maxListLength_,
k,
outDistances,
outIndices,
resources_);
}
void
IVFPQ::runPQNoPrecomputedCodes_(
Tensor<float, 2, true>& queries,
DeviceTensor<float, 2, true>& coarseDistances,
DeviceTensor<int, 2, true>& coarseIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices) {
FAISS_ASSERT(!quantizer_->getUseFloat16());
auto& coarseCentroids = quantizer_->getVectorsFloat32Ref();
runPQScanMultiPassNoPrecomputed(queries,
coarseCentroids,
pqCentroidsInnermostCode_,
coarseIndices,
useFloat16LookupTables_,
bytesPerVector_,
numSubQuantizers_,
numSubQuantizerCodes_,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
deviceListLengths_,
maxListLength_,
k,
outDistances,
outIndices,
resources_);
}
} } // namespace
|
4cfb7cd60ceb220ddf3a282ff2d5991327cfe7d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/ztranspose.cu normal z -> d, Tue Feb 9 16:05:33 2016
@author Stan Tomov
@author Mark Gates
*/
#include "magma_internal.h"
#define PRECISION_d
#if defined(PRECISION_z)
#define NX 16
#else
#define NX 32
#endif
#define NB 32
#define NY 8
// tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB.
// uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly.
// subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB
// for each subtile
// load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY
// save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY
// A += NX
// AT += NX*ldat
//
// e.g., with NB=32, NX=32, NY=8 ([sdc] precisions)
// load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 )
// save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14)
//
// e.g., with NB=32, NX=16, NY=8 (z precision)
// load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14)
// save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12)
// (AT21 AT22)
__device__ void
dtranspose_device(
int m, int n,
const double *A, int lda,
double *AT, int ldat)
{
__shared__ double sA[NB][NX+1];
int tx = threadIdx.x;
int ty = threadIdx.y;
int ibx = blockIdx.x*NB;
int iby = blockIdx.y*NB;
int i, j;
A += ibx + tx + (iby + ty)*lda;
AT += iby + tx + (ibx + ty)*ldat;
#pragma unroll
for( int tile=0; tile < NB/NX; ++tile ) {
// load NX-by-NB subtile transposed from A into sA
i = ibx + tx + tile*NX;
j = iby + ty;
if (i < m) {
#pragma unroll
for( int j2=0; j2 < NB; j2 += NY ) {
if (j + j2 < n) {
sA[ty + j2][tx] = A[j2*lda];
}
}
}
__syncthreads();
// save NB-by-NX subtile from sA into AT
i = iby + tx;
j = ibx + ty + tile*NX;
#pragma unroll
for( int i2=0; i2 < NB; i2 += NX ) {
if (i + i2 < n) {
#pragma unroll
for( int j2=0; j2 < NX; j2 += NY ) {
if (j + j2 < m) {
AT[i2 + j2*ldat] = sA[tx + i2][ty + j2];
}
}
}
}
__syncthreads();
// move to next subtile
A += NX;
AT += NX*ldat;
}
}
/*
kernel wrapper to call the device function.
*/
__global__
void dtranspose_kernel(
int m, int n,
const double *A, int lda,
double *AT, int ldat)
{
dtranspose_device(m, n, A, lda, AT, ldat);
}
__global__
void dtranspose_kernel_batched(
int m, int n,
double **dA_array, int lda,
double **dAT_array, int ldat)
{
int batchid = blockIdx.z;
dtranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat);
}
/**
Purpose
-------
dtranspose copies and transposes a matrix dA to matrix dAT.
Same as dtranspose, but adds queue argument.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The M-by-N matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= M.
@param[in]
dAT DOUBLE PRECISION array, dimension (LDDAT,M)
The N-by-M matrix dAT.
@param[in]
lddat INTEGER
The leading dimension of the array dAT. LDDAT >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dtranspose_q(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_ptr dAT, magma_int_t lddat,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -4;
else if ( lddat < n )
info = -6;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
dim3 threads( NX, NY );
dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) );
hipLaunchKernelGGL(( dtranspose_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dA, ldda, dAT, lddat );
}
/**
Purpose
-------
dtranspose_batched copies and transposes a matrix dA_array[i] to matrix dAT_array[i].
Same as dtranspose_batched, but adds queue argument.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA_array
DOUBLE PRECISION* array, dimension (batchCount)
array of pointers to the matrices dA, where each dA is of dimension (LDDA,N)
The M-by-N matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= M.
@param[in]
dAT_array
DOUBLE PRECISION* array, dimension (batchCount)
array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M)
The N-by-M matrix dAT.
@param[in]
lddat INTEGER
The leading dimension of the array dAT. LDDAT >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@param[in]
batchCount Number of matrices in dA_array and dAT_array
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dtranspose_batched(
magma_int_t m, magma_int_t n,
double **dA_array, magma_int_t ldda,
double **dAT_array, magma_int_t lddat,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -4;
else if ( lddat < n )
info = -6;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
dim3 threads( NX, NY, 1 );
dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount );
hipLaunchKernelGGL(( dtranspose_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dA_array, ldda, dAT_array, lddat );
}
| 4cfb7cd60ceb220ddf3a282ff2d5991327cfe7d8.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/ztranspose.cu normal z -> d, Tue Feb 9 16:05:33 2016
@author Stan Tomov
@author Mark Gates
*/
#include "magma_internal.h"
#define PRECISION_d
#if defined(PRECISION_z)
#define NX 16
#else
#define NX 32
#endif
#define NB 32
#define NY 8
// tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB.
// uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly.
// subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB
// for each subtile
// load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY
// save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY
// A += NX
// AT += NX*ldat
//
// e.g., with NB=32, NX=32, NY=8 ([sdc] precisions)
// load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 )
// save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14)
//
// e.g., with NB=32, NX=16, NY=8 (z precision)
// load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14)
// save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12)
// (AT21 AT22)
__device__ void
dtranspose_device(
int m, int n,
const double *A, int lda,
double *AT, int ldat)
{
__shared__ double sA[NB][NX+1];
int tx = threadIdx.x;
int ty = threadIdx.y;
int ibx = blockIdx.x*NB;
int iby = blockIdx.y*NB;
int i, j;
A += ibx + tx + (iby + ty)*lda;
AT += iby + tx + (ibx + ty)*ldat;
#pragma unroll
for( int tile=0; tile < NB/NX; ++tile ) {
// load NX-by-NB subtile transposed from A into sA
i = ibx + tx + tile*NX;
j = iby + ty;
if (i < m) {
#pragma unroll
for( int j2=0; j2 < NB; j2 += NY ) {
if (j + j2 < n) {
sA[ty + j2][tx] = A[j2*lda];
}
}
}
__syncthreads();
// save NB-by-NX subtile from sA into AT
i = iby + tx;
j = ibx + ty + tile*NX;
#pragma unroll
for( int i2=0; i2 < NB; i2 += NX ) {
if (i + i2 < n) {
#pragma unroll
for( int j2=0; j2 < NX; j2 += NY ) {
if (j + j2 < m) {
AT[i2 + j2*ldat] = sA[tx + i2][ty + j2];
}
}
}
}
__syncthreads();
// move to next subtile
A += NX;
AT += NX*ldat;
}
}
/*
kernel wrapper to call the device function.
*/
__global__
void dtranspose_kernel(
int m, int n,
const double *A, int lda,
double *AT, int ldat)
{
dtranspose_device(m, n, A, lda, AT, ldat);
}
__global__
void dtranspose_kernel_batched(
int m, int n,
double **dA_array, int lda,
double **dAT_array, int ldat)
{
int batchid = blockIdx.z;
dtranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat);
}
/**
Purpose
-------
dtranspose copies and transposes a matrix dA to matrix dAT.
Same as dtranspose, but adds queue argument.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The M-by-N matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= M.
@param[in]
dAT DOUBLE PRECISION array, dimension (LDDAT,M)
The N-by-M matrix dAT.
@param[in]
lddat INTEGER
The leading dimension of the array dAT. LDDAT >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dtranspose_q(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_ptr dAT, magma_int_t lddat,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -4;
else if ( lddat < n )
info = -6;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
dim3 threads( NX, NY );
dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) );
dtranspose_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, dA, ldda, dAT, lddat );
}
/**
Purpose
-------
dtranspose_batched copies and transposes a matrix dA_array[i] to matrix dAT_array[i].
Same as dtranspose_batched, but adds queue argument.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA_array
DOUBLE PRECISION* array, dimension (batchCount)
array of pointers to the matrices dA, where each dA is of dimension (LDDA,N)
The M-by-N matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= M.
@param[in]
dAT_array
DOUBLE PRECISION* array, dimension (batchCount)
array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M)
The N-by-M matrix dAT.
@param[in]
lddat INTEGER
The leading dimension of the array dAT. LDDAT >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@param[in]
batchCount Number of matrices in dA_array and dAT_array
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dtranspose_batched(
magma_int_t m, magma_int_t n,
double **dA_array, magma_int_t ldda,
double **dAT_array, magma_int_t lddat,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -4;
else if ( lddat < n )
info = -6;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
dim3 threads( NX, NY, 1 );
dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount );
dtranspose_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, dA_array, ldda, dAT_array, lddat );
}
|
0cece5dd1c81de8d77b895151b8eb4af64c95aa4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// [Z,Y,X]_THREADS is the number of participating threads in the z, y, x
// dimension of the block. If set to 0 it means that dimension doesn't
// participate, otherwise it is the number of threads. We could start with warp
// reductions, then reduce the warps, this could save some shared memory, but
// may actually be slower.
//
// EXAMPLE USAGE:
// blockReduceSum<X_THREADS, Y_THREADS, Z_THREADS>
// (output[output_index], inputs[input_index],
// [] __device__ (T& a, const T b) { a += b; });
//
// Note: We agressively template functions taking dim3 in the functions below
// because ROCM uses different types for the various dim3 and maps them
// directly to intrinsics, but they're dim3 when used after modification.
//
template <
bool X_REDUCE,
bool Y_REDUCE,
bool Z_REDUCE,
typename T,
typename Func,
typename _dim3ti,
typename _dim3bd>
__device__ void blockReduce(
T& out,
const T inp_val,
Func reduction_op,
const _dim3ti& thread_idx,
const _dim3bd& block_dim,
T* shared_mem,
bool read_write_pred,
T init_val) {
unsigned int reduction_size = (X_REDUCE ? block_dim.x : 1) *
(Y_REDUCE ? block_dim.y : 1) * (Z_REDUCE ? block_dim.z : 1);
// If this thread will output a final result
bool should_write = true;
if (X_REDUCE)
should_write = should_write && thread_idx.x == 0;
if (Y_REDUCE)
should_write = should_write && thread_idx.y == 0;
if (Z_REDUCE)
should_write = should_write && thread_idx.z == 0;
unsigned int reduction_stride;
unsigned int reduction_tid;
unsigned int linear_tid;
if (X_REDUCE && !Y_REDUCE && Z_REDUCE) {
// Transpose Z and Y in the shared memory so Z and X dims are contiguous in
// smem
reduction_stride = 1;
linear_tid = threadIdx.y * blockDim.z * blockDim.x +
threadIdx.z * blockDim.x + threadIdx.x;
reduction_tid = threadIdx.z * blockDim.x + threadIdx.x;
} else {
// Normal reduction in order
reduction_stride =
(X_REDUCE ? 1
: (Y_REDUCE ? block_dim.x
: (Z_REDUCE ? block_dim.x * block_dim.y : 0)));
linear_tid = thread_idx.z * block_dim.y * block_dim.x +
thread_idx.y * block_dim.x + thread_idx.x;
reduction_tid = (Z_REDUCE ? thread_idx.z : 0) *
(Y_REDUCE ? block_dim.y : 1) * (X_REDUCE ? block_dim.x : 1) +
(Y_REDUCE ? thread_idx.y : 0) * (X_REDUCE ? block_dim.x : 1) +
(X_REDUCE ? thread_idx.x : 0);
}
assert(reduction_stride != 0);
if (read_write_pred) {
shared_mem[linear_tid] = inp_val;
} else {
shared_mem[linear_tid] = init_val;
}
__syncthreads();
// Reduce down to nearest power of 2:
int np2 = 1 << (31 - __clz(reduction_size));
if (reduction_tid < np2) {
if (reduction_tid + np2 < reduction_size) {
reduction_op(
shared_mem[linear_tid],
shared_mem[linear_tid + np2 * reduction_stride]);
}
}
__syncthreads();
// for (int factor = np2/2; factor > contig_threads / 2; factor>>=1) {
for (int factor = np2 / 2; factor > 0; factor >>= 1) {
if (reduction_tid < factor) {
reduction_op(
shared_mem[linear_tid],
shared_mem[linear_tid + factor * reduction_stride]);
}
__syncthreads();
}
if (should_write && read_write_pred)
out = shared_mem[linear_tid];
}
| 0cece5dd1c81de8d77b895151b8eb4af64c95aa4.cu | // [Z,Y,X]_THREADS is the number of participating threads in the z, y, x
// dimension of the block. If set to 0 it means that dimension doesn't
// participate, otherwise it is the number of threads. We could start with warp
// reductions, then reduce the warps, this could save some shared memory, but
// may actually be slower.
//
// EXAMPLE USAGE:
// blockReduceSum<X_THREADS, Y_THREADS, Z_THREADS>
// (output[output_index], inputs[input_index],
// [] __device__ (T& a, const T b) { a += b; });
//
// Note: We agressively template functions taking dim3 in the functions below
// because ROCM uses different types for the various dim3 and maps them
// directly to intrinsics, but they're dim3 when used after modification.
//
template <
bool X_REDUCE,
bool Y_REDUCE,
bool Z_REDUCE,
typename T,
typename Func,
typename _dim3ti,
typename _dim3bd>
__device__ void blockReduce(
T& out,
const T inp_val,
Func reduction_op,
const _dim3ti& thread_idx,
const _dim3bd& block_dim,
T* shared_mem,
bool read_write_pred,
T init_val) {
unsigned int reduction_size = (X_REDUCE ? block_dim.x : 1) *
(Y_REDUCE ? block_dim.y : 1) * (Z_REDUCE ? block_dim.z : 1);
// If this thread will output a final result
bool should_write = true;
if (X_REDUCE)
should_write = should_write && thread_idx.x == 0;
if (Y_REDUCE)
should_write = should_write && thread_idx.y == 0;
if (Z_REDUCE)
should_write = should_write && thread_idx.z == 0;
unsigned int reduction_stride;
unsigned int reduction_tid;
unsigned int linear_tid;
if (X_REDUCE && !Y_REDUCE && Z_REDUCE) {
// Transpose Z and Y in the shared memory so Z and X dims are contiguous in
// smem
reduction_stride = 1;
linear_tid = threadIdx.y * blockDim.z * blockDim.x +
threadIdx.z * blockDim.x + threadIdx.x;
reduction_tid = threadIdx.z * blockDim.x + threadIdx.x;
} else {
// Normal reduction in order
reduction_stride =
(X_REDUCE ? 1
: (Y_REDUCE ? block_dim.x
: (Z_REDUCE ? block_dim.x * block_dim.y : 0)));
linear_tid = thread_idx.z * block_dim.y * block_dim.x +
thread_idx.y * block_dim.x + thread_idx.x;
reduction_tid = (Z_REDUCE ? thread_idx.z : 0) *
(Y_REDUCE ? block_dim.y : 1) * (X_REDUCE ? block_dim.x : 1) +
(Y_REDUCE ? thread_idx.y : 0) * (X_REDUCE ? block_dim.x : 1) +
(X_REDUCE ? thread_idx.x : 0);
}
assert(reduction_stride != 0);
if (read_write_pred) {
shared_mem[linear_tid] = inp_val;
} else {
shared_mem[linear_tid] = init_val;
}
__syncthreads();
// Reduce down to nearest power of 2:
int np2 = 1 << (31 - __clz(reduction_size));
if (reduction_tid < np2) {
if (reduction_tid + np2 < reduction_size) {
reduction_op(
shared_mem[linear_tid],
shared_mem[linear_tid + np2 * reduction_stride]);
}
}
__syncthreads();
// for (int factor = np2/2; factor > contig_threads / 2; factor>>=1) {
for (int factor = np2 / 2; factor > 0; factor >>= 1) {
if (reduction_tid < factor) {
reduction_op(
shared_mem[linear_tid],
shared_mem[linear_tid + factor * reduction_stride]);
}
__syncthreads();
}
if (should_write && read_write_pred)
out = shared_mem[linear_tid];
}
|
e6e513c013075453bc23b654963f3491790c5256.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define N 32
__global__ void mm(double *a, double *b, double *c)
{
/* From: https://wiki.tiker.net/PyCuda/Examples/MatrixmulSimple */
// 2D Thread ID (assuming that only *one* block will be executed)
int tx = threadIdx.x;
int ty = threadIdx.y;
// Pvalue is used to store the element of the matrix
// that is computed by the thread
double Pvalue = 0;
// Each thread loads one row of M and one column of N,
// to produce Pvalue:
for (int k = 0; k < N; ++k) {
double Aelement = a[ty * N + k];
double Belement = b[k * N + tx];
Pvalue += Aelement * Belement;
}
// Write the matrix to device memory;
// each thread writes one element
c[ty * N + tx] = Pvalue;
}
int main () {
double *a, *b, *c;
double *d_a, *d_b, *d_c;
double size = sizeof(double) * N*N;
dim3 grid(1, 1);
dim3 block(N, N);
a = (double *) malloc (size);
b = (double *) malloc (size);
c = (double *) malloc (size);
hipMalloc ((void**)&d_a, size);
hipMalloc ((void**)&d_b, size);
hipMalloc ((void**)&d_c, size);
for( int i = 0; i < N*N; i++ )
{
a[i] = (double) ( rand() ) / ( RAND_MAX + 1.0 );
b[i] = (double) ( rand() ) / ( RAND_MAX + 1.0 );
c[i] = 0;
}
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
hipMemcpy(d_c, c, size, hipMemcpyHostToDevice);
for (int i=1; i<100; i++) {
hipLaunchKernelGGL(( mm) , dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c);
}
hipDeviceSynchronize();
hipMemcpy (c, d_c, size, hipMemcpyDeviceToHost);
//for( int i=0; i < N*N; i++ )
//{
// printf("%f\t%f\t%f\n", a[i], b[i], c[i]);
//}
printf("%f\n", c[N * N/2]);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
| e6e513c013075453bc23b654963f3491790c5256.cu | #include <stdio.h>
#include <cuda.h>
#define N 32
__global__ void mm(double *a, double *b, double *c)
{
/* From: https://wiki.tiker.net/PyCuda/Examples/MatrixmulSimple */
// 2D Thread ID (assuming that only *one* block will be executed)
int tx = threadIdx.x;
int ty = threadIdx.y;
// Pvalue is used to store the element of the matrix
// that is computed by the thread
double Pvalue = 0;
// Each thread loads one row of M and one column of N,
// to produce Pvalue:
for (int k = 0; k < N; ++k) {
double Aelement = a[ty * N + k];
double Belement = b[k * N + tx];
Pvalue += Aelement * Belement;
}
// Write the matrix to device memory;
// each thread writes one element
c[ty * N + tx] = Pvalue;
}
int main () {
double *a, *b, *c;
double *d_a, *d_b, *d_c;
double size = sizeof(double) * N*N;
dim3 grid(1, 1);
dim3 block(N, N);
a = (double *) malloc (size);
b = (double *) malloc (size);
c = (double *) malloc (size);
cudaMalloc ((void**)&d_a, size);
cudaMalloc ((void**)&d_b, size);
cudaMalloc ((void**)&d_c, size);
for( int i = 0; i < N*N; i++ )
{
a[i] = (double) ( rand() ) / ( RAND_MAX + 1.0 );
b[i] = (double) ( rand() ) / ( RAND_MAX + 1.0 );
c[i] = 0;
}
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, size, cudaMemcpyHostToDevice);
for (int i=1; i<100; i++) {
mm <<<grid, block>>> (d_a, d_b, d_c);
}
cudaDeviceSynchronize();
cudaMemcpy (c, d_c, size, cudaMemcpyDeviceToHost);
//for( int i=0; i < N*N; i++ )
//{
// printf("%f\t%f\t%f\n", a[i], b[i], c[i]);
//}
printf("%f\n", c[N * N/2]);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
|
4586479d2b2a804466a8add54940ca81371345ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void nan_kernel(float* data, const bool* mask, int len, float nan) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= len) return;
if (!mask[tid]) data[tid] = nan;
} | 4586479d2b2a804466a8add54940ca81371345ae.cu | #include "includes.h"
__global__ void nan_kernel(float* data, const bool* mask, int len, float nan) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= len) return;
if (!mask[tid]) data[tid] = nan;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.