hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
d920349aca63419f44401f5280d7c35e09a33453.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ElementCuda.h"
__global__ void constantCreatorGpu(int n, float* c, float *x, float *y,unsigned int* mesh)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
unsigned int counter = i*6;
c[counter++] = (x[mesh[i*4+0]] - x[mesh[i*4+1]] + x[mesh[i*4+2]] - x[mesh[i*4+3]])/4;
c[counter++] = (x[mesh[i*4+0]] - x[mesh[i*4+1]] - x[mesh[i*4+2]] + x[mesh[i*4+3]])/4;
c[counter++] = (x[mesh[i*4+0]] - x[mesh[i*4+3]] - x[mesh[i*4+2]] + x[mesh[i*4+1]])/4;
c[counter++] = (y[mesh[i*4+0]] - y[mesh[i*4+1]] + y[mesh[i*4+2]] - y[mesh[i*4+3]])/4;
c[counter++] = (y[mesh[i*4+0]] - y[mesh[i*4+1]] - y[mesh[i*4+2]] + y[mesh[i*4+3]])/4;
c[counter++] = (y[mesh[i*4+0]] - y[mesh[i*4+3]] - y[mesh[i*4+2]] + y[mesh[i*4+1]])/4;
//printf("element Number %d and c is %.2f %.2f %.2f %.2f %.2f %.2f\n",i,c[i*6+0], c[i*6+1],c[i*6+2],c[i*6+3],c[i*6+4],c[i*6+5]);
// defined the constants c1x to c3y on GPU
}
};
__global__ void stiffnessMatrixFirstOrderGpu(unsigned int n, unsigned int nip ,float* in, unsigned int* ip, float* iw, float* c, float* D, float* k)
// n is the total simulation
// nip is the number of integration point squared.
// in is the integrationNode
// ip -> integrationPos
// iw -> integrationWeight
// c -> constants
// D -> material matrix
// k -> stiffness matrix
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
//printf("index is = %d\n",index );
for (int i = index; i < n; i += stride)
{
unsigned int noIP = i%nip;
unsigned int numberElement = i/nip;
double XI = in[ip[2*noIP]]; double YI = in[ip[2*noIP+1]];
// Jacobian
double J11 = c[numberElement*6+0]*YI-c[numberElement*6+1]; double J12 = c[numberElement*6+3]*YI-c[numberElement*6+4];
double J21 = c[numberElement*6+0]*XI-c[numberElement*6+2]; double J22 = c[numberElement*6+3]*XI-c[numberElement*6+5];
double detJ = J11*J22-J12*J21;
double WeightPerDetJ = (iw[ip[2*noIP]]*iw[ip[2*noIP+1]])/detJ;
// derveativs of the shape function N1x N2x ... N1y N2y ...
double Ni[8] = {J22*( YI-1)/4 - J12*( XI-1)/4, J22*(-YI+1)/4 - J12*(-XI-1)/4, \
J22*( YI+1)/4 - J12*( XI+1)/4, J22*(-YI-1)/4 - J12*(-XI+1)/4, \
J11*( XI-1)/4 - J21*( YI-1)/4, J11*(-XI-1)/4 - J21*(-YI+1)/4, \
J11*( XI+1)/4 - J21*( YI+1)/4, J11*(-XI+1)/4 - J21*(-YI-1)/4};
// multiplication of shape functions N1x^2 N1x*N2x ....
double N[36];
unsigned int counterN = 0;
for (unsigned int i = 0; i < 8; i++)
{
for (unsigned int j = i; j < 8 ; j++)
N[counterN++] = Ni[i]*Ni[j];
};
// find the position to start filling the stiffness matrix
unsigned int counter = 36*(numberElement*nip+noIP);
// writes all 36 components of the 8 by 8 stiffness Matrix considering symmetry
k[counter+0] = WeightPerDetJ*(D[0]*N[0] + 2*D[4]*N[4] + D[2]*N[26]);
k[counter+1] = WeightPerDetJ*(D[4]*N[0] + D[5]*N[26] + D[3]*N[4] + D[2]*N[4]);
k[counter+2] = WeightPerDetJ*(D[2]*N[0] + 2*D[5]*N[4] + D[1]*N[26]);
k[counter+3] = WeightPerDetJ*(D[0]*N[1] + D[4]*N[5] + D[4]*N[11] + D[2]*N[27]);
k[counter+4] = WeightPerDetJ*(D[4]*N[1] + D[3]*N[11] + D[2]*N[5] + D[5]*N[27]);
k[counter+5] = WeightPerDetJ*(D[0]*N[8] + 2*D[4]*N[12] + D[2]*N[30]);
k[counter+6] = WeightPerDetJ*(D[4]*N[1] + D[3]*N[5] + D[2]*N[11] + D[5]*N[27]);
k[counter+7] = WeightPerDetJ*(D[2]*N[1] + D[5]*N[5] + D[5]*N[11] + D[1]*N[27]);
k[counter+8] = WeightPerDetJ*(D[4]*N[8] + D[5]*N[30] + D[3]*N[12] + D[2]*N[12]);
k[counter+9] = WeightPerDetJ*(D[2]*N[8] + 2*D[5]*N[12] + D[1]*N[30]);
k[counter+10] = WeightPerDetJ*(D[0]*N[2] + D[4]*N[6] + D[4]*N[17] + D[2]*N[28]);
k[counter+11] = WeightPerDetJ*(D[4]*N[2] + D[3]*N[17] + D[2]*N[6] + D[5]*N[28]);
k[counter+12] = WeightPerDetJ*(D[0]*N[9] + D[4]*N[13] + D[4]*N[18] + D[2]*N[31]);
k[counter+13] = WeightPerDetJ*(D[4]*N[9] + D[3]*N[18] + D[2]*N[13] + D[5]*N[31]);
k[counter+14] = WeightPerDetJ*(D[0]*N[15] + 2*D[4]*N[19] + D[2]*N[33]);
k[counter+15] = WeightPerDetJ*(D[4]*N[2] + D[3]*N[6] + D[2]*N[17] + D[5]*N[28]);
k[counter+16] = WeightPerDetJ*(D[2]*N[2] + D[5]*N[6] + D[5]*N[17] + D[1]*N[28]);
k[counter+17] = WeightPerDetJ*(D[4]*N[9] + D[3]*N[13] + D[2]*N[18] + D[5]*N[31]);
k[counter+18] = WeightPerDetJ*(D[2]*N[9] + D[5]*N[13] + D[5]*N[18] + D[1]*N[31]);
k[counter+19] = WeightPerDetJ*(D[4]*N[15] + D[5]*N[33] + D[3]*N[19] + D[2]*N[19]);
k[counter+20] = WeightPerDetJ*(D[2]*N[15] + 2*D[5]*N[19] + D[1]*N[33]);
k[counter+21] = WeightPerDetJ*(D[0]*N[3] + D[4]*N[7] + D[4]*N[22] + D[2]*N[29]);
k[counter+22] = WeightPerDetJ*(D[4]*N[3] + D[3]*N[22] + D[2]*N[7] + D[5]*N[29]);
k[counter+23] = WeightPerDetJ*(D[0]*N[10] + D[4]*N[14] + D[4]*N[23] + D[2]*N[32]);
k[counter+24] = WeightPerDetJ*(D[4]*N[10] + D[3]*N[23] + D[2]*N[14] + D[5]*N[32]);
k[counter+25] = WeightPerDetJ*(D[0]*N[16] + D[4]*N[20] + D[4]*N[24] + D[2]*N[34]);
k[counter+26] = WeightPerDetJ*(D[4]*N[16] + D[3]*N[24] + D[2]*N[20] + D[5]*N[34]);
k[counter+27] = WeightPerDetJ*(D[0]*N[21] + 2*D[4]*N[25] + D[2]*N[35]);
k[counter+28] = WeightPerDetJ*(D[4]*N[3] + D[3]*N[7] + D[2]*N[22] + D[5]*N[29]);
k[counter+29] = WeightPerDetJ*(D[2]*N[3] + D[5]*N[7] + D[5]*N[22] + D[1]*N[29]);
k[counter+30] = WeightPerDetJ*(D[4]*N[10] + D[3]*N[14] + D[2]*N[23] + D[5]*N[32]);
k[counter+31] = WeightPerDetJ*(D[2]*N[10] + D[5]*N[14] + D[5]*N[23] + D[1]*N[32]);
k[counter+32] = WeightPerDetJ*(D[4]*N[16] + D[3]*N[20] + D[2]*N[24] + D[5]*N[34]);
k[counter+33] = WeightPerDetJ*(D[2]*N[16] + D[5]*N[20] + D[5]*N[24] + D[1]*N[34]);
k[counter+34] = WeightPerDetJ*(D[4]*N[21] + D[5]*N[35] + D[3]*N[25] + D[2]*N[25]);
k[counter+35] = WeightPerDetJ*(D[2]*N[21] + 2*D[5]*N[25] + D[1]*N[35]);
}
};
ElementCuda::ElementCuda(Material& mat, Geometry &geo, unsigned int n)
: material(&mat), geometry(&geo), numberOfIntegrationPoint(n)
{
// Initialize
numberOfElements = geometry->numberOfElementsG;
nipSquared = numberOfIntegrationPoint*numberOfIntegrationPoint;
stiffMatSize = numberOfElements*sizeStiffMatPerEle*nipSquared;
integrationPoint(); // buids the integration point arrays
// cuda code
stiffnessMatrixFirstOrder();
};
ElementCuda::~ElementCuda()
{
Log::Logger().Info("ElementCuda Deleted");
hipFree(integrationNode);
hipFree(integrationPos);
hipFree(integrationWeight);
hipFree(c);
hipFree(stiffMat);
hipFree(x_d);
hipFree(y_d);
hipFree(mesh_d);
hipFree(D_d);
};
void ElementCuda::stiffnessMatrixFirstOrder()
{
constantCreator();
simulationSize = numberOfElements*nipSquared;
// copy the material matarix
hipMallocManaged(&stiffMat, stiffMatSize*sizeof(float));
hipMallocManaged(&D_d, 6*sizeof(float));
hipMemcpy(D_d, material->materialMatrix, 6*sizeof(float), hipMemcpyHostToDevice);
// Run the function for caculating constants on GPU
Log::Logger().Info("time for running on GPU");
Timer timer;
int numBlocks = (simulationSize + blockSize-1)/blockSize; //number of blocks used to run on GPU
hipLaunchKernelGGL(( stiffnessMatrixFirstOrderGpu), dim3(numBlocks), dim3(blockSize), 0, 0, numberOfElements, nipSquared,integrationNode, integrationPos, integrationWeight, c, D_d, stiffMat);
hipDeviceSynchronize();
}
void ElementCuda::constantCreator()
{
int device = -1;
hipGetDevice(&device);
hipMallocManaged(&c, numberOfElements*6*sizeof(float));
// copying arrays in geometry and the material class to the gpu
hipMallocManaged(&x_d, geometry->numberOfNodes*sizeof(float));
hipMallocManaged(&y_d, geometry->numberOfNodes*sizeof(float));
hipMallocManaged(&mesh_d, geometry->numberOfElementsG*4*sizeof(unsigned int));
hipMallocManaged(&mesh_d, geometry->numberOfElementsG*4*sizeof(unsigned int));
hipMemcpy(x_d, geometry->x, geometry->numberOfNodes*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(y_d, geometry->y, geometry->numberOfNodes*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(mesh_d, geometry->mesh, geometry->numberOfElementsG*4*sizeof(unsigned int), hipMemcpyHostToDevice);
// Run the function for caculating constants on GPU
int numBlocks = (numberOfElements + blockSize-1)/blockSize; //number of blocks used to run on GPU
hipLaunchKernelGGL(( constantCreatorGpu), dim3(numBlocks), dim3(blockSize), 0, 0, numberOfElements, c, x_d, y_d, mesh_d);
hipDeviceSynchronize();
}
void ElementCuda::integrationPoint()
// Creats the integration points
// XI = integrationNode[integrationPos[i]] YI = integrationNode[integrationPos[i+1]]
{
hipMallocManaged(&integrationNode, numberOfIntegrationPoint*sizeof(float));
hipMallocManaged(&integrationPos, numberOfIntegrationPoint*dimention*numberOfIntegrationPoint*sizeof(float));
hipMallocManaged(&integrationWeight, numberOfIntegrationPoint*sizeof(float));
unsigned int counter = 0;
for (unsigned int i = 0; i < numberOfIntegrationPoint; i++)
for (unsigned int j = 0; j < numberOfIntegrationPoint; j++)
{
integrationPos[counter++] = i;
integrationPos[counter++] = j;
};
if (numberOfIntegrationPoint == 1) {
integrationNode[0] = 0.0f; integrationWeight[0] = 4.0f;
} else if (numberOfIntegrationPoint == 2) {
integrationNode[0] = -0.57735f; integrationWeight[0] = 1.0f;
integrationNode[1] = 0.57735f; integrationWeight[1] = 1.0f;
} else if (numberOfIntegrationPoint == 3) {
integrationNode[0] = -0.774596f; integrationWeight[0] = 0.555556f;
integrationNode[1] = 0.0f ; integrationWeight[1] = 0.888889f;
integrationNode[2] = 0.774596f; integrationWeight[2] = 0.555556f;
} else if (numberOfIntegrationPoint == 4) {
integrationNode[0] = -0.861136f; integrationWeight[0] = 0.347855f;
integrationNode[1] = -0.339981f; integrationWeight[1] = 0.652145f;
integrationNode[2] = 0.339981f; integrationWeight[2] = 0.652145f;
integrationNode[3] = 0.861136f; integrationWeight[3] = 0.347855f;
} else if (numberOfIntegrationPoint == 5) {
integrationNode[0] = -0.90618f; integrationWeight[0] = 0.236927f;
integrationNode[1] = -0.538469f; integrationWeight[1] = 0.478629f;
integrationNode[2] = 0.0f; integrationWeight[2] = 0.568889f;
integrationNode[3] = 0.538469f; integrationWeight[3] = 0.478629f;
integrationNode[4] = 0.90618f; integrationWeight[4] = 0.236927f;
} else {
Log::Logger().Error("Integration points more than five is under construction");
}
};
| d920349aca63419f44401f5280d7c35e09a33453.cu | #include "ElementCuda.h"
__global__ void constantCreatorGpu(int n, float* c, float *x, float *y,unsigned int* mesh)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
unsigned int counter = i*6;
c[counter++] = (x[mesh[i*4+0]] - x[mesh[i*4+1]] + x[mesh[i*4+2]] - x[mesh[i*4+3]])/4;
c[counter++] = (x[mesh[i*4+0]] - x[mesh[i*4+1]] - x[mesh[i*4+2]] + x[mesh[i*4+3]])/4;
c[counter++] = (x[mesh[i*4+0]] - x[mesh[i*4+3]] - x[mesh[i*4+2]] + x[mesh[i*4+1]])/4;
c[counter++] = (y[mesh[i*4+0]] - y[mesh[i*4+1]] + y[mesh[i*4+2]] - y[mesh[i*4+3]])/4;
c[counter++] = (y[mesh[i*4+0]] - y[mesh[i*4+1]] - y[mesh[i*4+2]] + y[mesh[i*4+3]])/4;
c[counter++] = (y[mesh[i*4+0]] - y[mesh[i*4+3]] - y[mesh[i*4+2]] + y[mesh[i*4+1]])/4;
//printf("element Number %d and c is %.2f %.2f %.2f %.2f %.2f %.2f\n",i,c[i*6+0], c[i*6+1],c[i*6+2],c[i*6+3],c[i*6+4],c[i*6+5]);
// defined the constants c1x to c3y on GPU
}
};
__global__ void stiffnessMatrixFirstOrderGpu(unsigned int n, unsigned int nip ,float* in, unsigned int* ip, float* iw, float* c, float* D, float* k)
// n is the total simulation
// nip is the number of integration point squared.
// in is the integrationNode
// ip -> integrationPos
// iw -> integrationWeight
// c -> constants
// D -> material matrix
// k -> stiffness matrix
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
//printf("index is = %d\n",index );
for (int i = index; i < n; i += stride)
{
unsigned int noIP = i%nip;
unsigned int numberElement = i/nip;
double XI = in[ip[2*noIP]]; double YI = in[ip[2*noIP+1]];
// Jacobian
double J11 = c[numberElement*6+0]*YI-c[numberElement*6+1]; double J12 = c[numberElement*6+3]*YI-c[numberElement*6+4];
double J21 = c[numberElement*6+0]*XI-c[numberElement*6+2]; double J22 = c[numberElement*6+3]*XI-c[numberElement*6+5];
double detJ = J11*J22-J12*J21;
double WeightPerDetJ = (iw[ip[2*noIP]]*iw[ip[2*noIP+1]])/detJ;
// derveativs of the shape function N1x N2x ... N1y N2y ...
double Ni[8] = {J22*( YI-1)/4 - J12*( XI-1)/4, J22*(-YI+1)/4 - J12*(-XI-1)/4, \
J22*( YI+1)/4 - J12*( XI+1)/4, J22*(-YI-1)/4 - J12*(-XI+1)/4, \
J11*( XI-1)/4 - J21*( YI-1)/4, J11*(-XI-1)/4 - J21*(-YI+1)/4, \
J11*( XI+1)/4 - J21*( YI+1)/4, J11*(-XI+1)/4 - J21*(-YI-1)/4};
// multiplication of shape functions N1x^2 N1x*N2x ....
double N[36];
unsigned int counterN = 0;
for (unsigned int i = 0; i < 8; i++)
{
for (unsigned int j = i; j < 8 ; j++)
N[counterN++] = Ni[i]*Ni[j];
};
// find the position to start filling the stiffness matrix
unsigned int counter = 36*(numberElement*nip+noIP);
// writes all 36 components of the 8 by 8 stiffness Matrix considering symmetry
k[counter+0] = WeightPerDetJ*(D[0]*N[0] + 2*D[4]*N[4] + D[2]*N[26]);
k[counter+1] = WeightPerDetJ*(D[4]*N[0] + D[5]*N[26] + D[3]*N[4] + D[2]*N[4]);
k[counter+2] = WeightPerDetJ*(D[2]*N[0] + 2*D[5]*N[4] + D[1]*N[26]);
k[counter+3] = WeightPerDetJ*(D[0]*N[1] + D[4]*N[5] + D[4]*N[11] + D[2]*N[27]);
k[counter+4] = WeightPerDetJ*(D[4]*N[1] + D[3]*N[11] + D[2]*N[5] + D[5]*N[27]);
k[counter+5] = WeightPerDetJ*(D[0]*N[8] + 2*D[4]*N[12] + D[2]*N[30]);
k[counter+6] = WeightPerDetJ*(D[4]*N[1] + D[3]*N[5] + D[2]*N[11] + D[5]*N[27]);
k[counter+7] = WeightPerDetJ*(D[2]*N[1] + D[5]*N[5] + D[5]*N[11] + D[1]*N[27]);
k[counter+8] = WeightPerDetJ*(D[4]*N[8] + D[5]*N[30] + D[3]*N[12] + D[2]*N[12]);
k[counter+9] = WeightPerDetJ*(D[2]*N[8] + 2*D[5]*N[12] + D[1]*N[30]);
k[counter+10] = WeightPerDetJ*(D[0]*N[2] + D[4]*N[6] + D[4]*N[17] + D[2]*N[28]);
k[counter+11] = WeightPerDetJ*(D[4]*N[2] + D[3]*N[17] + D[2]*N[6] + D[5]*N[28]);
k[counter+12] = WeightPerDetJ*(D[0]*N[9] + D[4]*N[13] + D[4]*N[18] + D[2]*N[31]);
k[counter+13] = WeightPerDetJ*(D[4]*N[9] + D[3]*N[18] + D[2]*N[13] + D[5]*N[31]);
k[counter+14] = WeightPerDetJ*(D[0]*N[15] + 2*D[4]*N[19] + D[2]*N[33]);
k[counter+15] = WeightPerDetJ*(D[4]*N[2] + D[3]*N[6] + D[2]*N[17] + D[5]*N[28]);
k[counter+16] = WeightPerDetJ*(D[2]*N[2] + D[5]*N[6] + D[5]*N[17] + D[1]*N[28]);
k[counter+17] = WeightPerDetJ*(D[4]*N[9] + D[3]*N[13] + D[2]*N[18] + D[5]*N[31]);
k[counter+18] = WeightPerDetJ*(D[2]*N[9] + D[5]*N[13] + D[5]*N[18] + D[1]*N[31]);
k[counter+19] = WeightPerDetJ*(D[4]*N[15] + D[5]*N[33] + D[3]*N[19] + D[2]*N[19]);
k[counter+20] = WeightPerDetJ*(D[2]*N[15] + 2*D[5]*N[19] + D[1]*N[33]);
k[counter+21] = WeightPerDetJ*(D[0]*N[3] + D[4]*N[7] + D[4]*N[22] + D[2]*N[29]);
k[counter+22] = WeightPerDetJ*(D[4]*N[3] + D[3]*N[22] + D[2]*N[7] + D[5]*N[29]);
k[counter+23] = WeightPerDetJ*(D[0]*N[10] + D[4]*N[14] + D[4]*N[23] + D[2]*N[32]);
k[counter+24] = WeightPerDetJ*(D[4]*N[10] + D[3]*N[23] + D[2]*N[14] + D[5]*N[32]);
k[counter+25] = WeightPerDetJ*(D[0]*N[16] + D[4]*N[20] + D[4]*N[24] + D[2]*N[34]);
k[counter+26] = WeightPerDetJ*(D[4]*N[16] + D[3]*N[24] + D[2]*N[20] + D[5]*N[34]);
k[counter+27] = WeightPerDetJ*(D[0]*N[21] + 2*D[4]*N[25] + D[2]*N[35]);
k[counter+28] = WeightPerDetJ*(D[4]*N[3] + D[3]*N[7] + D[2]*N[22] + D[5]*N[29]);
k[counter+29] = WeightPerDetJ*(D[2]*N[3] + D[5]*N[7] + D[5]*N[22] + D[1]*N[29]);
k[counter+30] = WeightPerDetJ*(D[4]*N[10] + D[3]*N[14] + D[2]*N[23] + D[5]*N[32]);
k[counter+31] = WeightPerDetJ*(D[2]*N[10] + D[5]*N[14] + D[5]*N[23] + D[1]*N[32]);
k[counter+32] = WeightPerDetJ*(D[4]*N[16] + D[3]*N[20] + D[2]*N[24] + D[5]*N[34]);
k[counter+33] = WeightPerDetJ*(D[2]*N[16] + D[5]*N[20] + D[5]*N[24] + D[1]*N[34]);
k[counter+34] = WeightPerDetJ*(D[4]*N[21] + D[5]*N[35] + D[3]*N[25] + D[2]*N[25]);
k[counter+35] = WeightPerDetJ*(D[2]*N[21] + 2*D[5]*N[25] + D[1]*N[35]);
}
};
ElementCuda::ElementCuda(Material& mat, Geometry &geo, unsigned int n)
: material(&mat), geometry(&geo), numberOfIntegrationPoint(n)
{
// Initialize
numberOfElements = geometry->numberOfElementsG;
nipSquared = numberOfIntegrationPoint*numberOfIntegrationPoint;
stiffMatSize = numberOfElements*sizeStiffMatPerEle*nipSquared;
integrationPoint(); // buids the integration point arrays
// cuda code
stiffnessMatrixFirstOrder();
};
ElementCuda::~ElementCuda()
{
Log::Logger().Info("ElementCuda Deleted");
cudaFree(integrationNode);
cudaFree(integrationPos);
cudaFree(integrationWeight);
cudaFree(c);
cudaFree(stiffMat);
cudaFree(x_d);
cudaFree(y_d);
cudaFree(mesh_d);
cudaFree(D_d);
};
void ElementCuda::stiffnessMatrixFirstOrder()
{
constantCreator();
simulationSize = numberOfElements*nipSquared;
// copy the material matarix
cudaMallocManaged(&stiffMat, stiffMatSize*sizeof(float));
cudaMallocManaged(&D_d, 6*sizeof(float));
cudaMemcpy(D_d, material->materialMatrix, 6*sizeof(float), cudaMemcpyHostToDevice);
// Run the function for caculating constants on GPU
Log::Logger().Info("time for running on GPU");
Timer timer;
int numBlocks = (simulationSize + blockSize-1)/blockSize; //number of blocks used to run on GPU
stiffnessMatrixFirstOrderGpu<<<numBlocks, blockSize>>>(numberOfElements, nipSquared,integrationNode, integrationPos, integrationWeight, c, D_d, stiffMat);
cudaDeviceSynchronize();
}
void ElementCuda::constantCreator()
{
int device = -1;
cudaGetDevice(&device);
cudaMallocManaged(&c, numberOfElements*6*sizeof(float));
// copying arrays in geometry and the material class to the gpu
cudaMallocManaged(&x_d, geometry->numberOfNodes*sizeof(float));
cudaMallocManaged(&y_d, geometry->numberOfNodes*sizeof(float));
cudaMallocManaged(&mesh_d, geometry->numberOfElementsG*4*sizeof(unsigned int));
cudaMallocManaged(&mesh_d, geometry->numberOfElementsG*4*sizeof(unsigned int));
cudaMemcpy(x_d, geometry->x, geometry->numberOfNodes*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y_d, geometry->y, geometry->numberOfNodes*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(mesh_d, geometry->mesh, geometry->numberOfElementsG*4*sizeof(unsigned int), cudaMemcpyHostToDevice);
// Run the function for caculating constants on GPU
int numBlocks = (numberOfElements + blockSize-1)/blockSize; //number of blocks used to run on GPU
constantCreatorGpu<<<numBlocks, blockSize>>>(numberOfElements, c, x_d, y_d, mesh_d);
cudaDeviceSynchronize();
}
void ElementCuda::integrationPoint()
// Creats the integration points
// XI = integrationNode[integrationPos[i]] YI = integrationNode[integrationPos[i+1]]
{
cudaMallocManaged(&integrationNode, numberOfIntegrationPoint*sizeof(float));
cudaMallocManaged(&integrationPos, numberOfIntegrationPoint*dimention*numberOfIntegrationPoint*sizeof(float));
cudaMallocManaged(&integrationWeight, numberOfIntegrationPoint*sizeof(float));
unsigned int counter = 0;
for (unsigned int i = 0; i < numberOfIntegrationPoint; i++)
for (unsigned int j = 0; j < numberOfIntegrationPoint; j++)
{
integrationPos[counter++] = i;
integrationPos[counter++] = j;
};
if (numberOfIntegrationPoint == 1) {
integrationNode[0] = 0.0f; integrationWeight[0] = 4.0f;
} else if (numberOfIntegrationPoint == 2) {
integrationNode[0] = -0.57735f; integrationWeight[0] = 1.0f;
integrationNode[1] = 0.57735f; integrationWeight[1] = 1.0f;
} else if (numberOfIntegrationPoint == 3) {
integrationNode[0] = -0.774596f; integrationWeight[0] = 0.555556f;
integrationNode[1] = 0.0f ; integrationWeight[1] = 0.888889f;
integrationNode[2] = 0.774596f; integrationWeight[2] = 0.555556f;
} else if (numberOfIntegrationPoint == 4) {
integrationNode[0] = -0.861136f; integrationWeight[0] = 0.347855f;
integrationNode[1] = -0.339981f; integrationWeight[1] = 0.652145f;
integrationNode[2] = 0.339981f; integrationWeight[2] = 0.652145f;
integrationNode[3] = 0.861136f; integrationWeight[3] = 0.347855f;
} else if (numberOfIntegrationPoint == 5) {
integrationNode[0] = -0.90618f; integrationWeight[0] = 0.236927f;
integrationNode[1] = -0.538469f; integrationWeight[1] = 0.478629f;
integrationNode[2] = 0.0f; integrationWeight[2] = 0.568889f;
integrationNode[3] = 0.538469f; integrationWeight[3] = 0.478629f;
integrationNode[4] = 0.90618f; integrationWeight[4] = 0.236927f;
} else {
Log::Logger().Error("Integration points more than five is under construction");
}
};
|
988b2b29cac2270074e5851088bdf285667145b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <hip/hip_runtime_api.h>
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{70.49,130.80},{69.15,120.96},{73.09,128.52},{86.47,121.82},
{79.33,135.78},{82.97,146.94},{78.52,137.85},{73.39,136.96},
{75.61,114.34},{79.52,146.23},{76.70,133.10},{65.33,122.13},
{79.50,129.38},{85.38,157.06},{72.33,133.95},{35.91,75.40},
{80.77,143.67},{52.68,89.78},{36.13,77.22},{83.90,142.29},
{59.04,106.05},{71.12,130.59},{32.06,71.46},{40.77,93.83},
{46.91,81.58},{94.85,156.72},{12.06,43.28},{ 0.71,30.59},
{21.39,61.08},{88.48,157.99},{50.25,118.75},{31.08,96.31},
{58.31,100.58},{27.50,76.70},{10.69,38.32},{95.66,150.32},
{15.97,68.61},{80.36,139.70},{71.97,130.69},{ 7.44,39.10},
{74.91,138.56},{ 5.87,38.76},{38.96,94.86},{ 8.20,38.92},
{57.23,103.56},{29.79,82.30},{64.80,112.26},{20.84,60.04},
{82.50,136.68},{12.69,56.76},{ 7.73,41.58},{79.38,150.25},
{99.73,182.45},{64.77,107.66},{38.09,78.58},{75.72,143.27},
{93.61,171.88},{46.59,116.39},{31.18,76.09},{89.36,167.09},
{57.97,100.04},{30.65,59.97},{57.40,107.18},{78.58,141.45},
{28.35,66.02},{72.36,132.13},{12.30,50.43},{42.29,86.56},
{42.23,74.52},{91.38,147.51},{35.52,95.32},{75.56,144.53},
{92.61,164.53},{21.57,81.55},{81.52,146.53},{62.55,108.13},
{45.86,94.30},{27.38,56.80},{98.47,164.28},{79.60,132.17},
{49.49,101.96},{99.59,171.38},{91.11,158.91},{52.61,99.92},
{51.00,105.16},{46.34,97.15},{60.72,90.68},{53.70,116.41},
{16.92,71.76},{75.14,129.96},{56.44,114.93},{63.62,122.81},
{87.88,145.18},{93.38,150.65},{24.15,70.67},{70.89,125.06},
{57.82,120.58},{27.41,77.74},{35.24,76.88},{75.73,146.45},
{72.88,135.76},{56.38,118.97},{ 9.35,53.94},{ 6.65,43.07},
{92.78,145.83},{60.23,103.33},{82.59,138.65},{82.59,147.20},
{ 5.38,47.28},{ 9.85,58.44},{45.16,104.55},{20.85,57.53},
{93.27,159.89},{56.15,94.30},{34.84,84.45},{49.78,105.58},
{24.81,73.02},{12.36,45.98},{51.76,116.24},{14.80,50.91},
{58.76,114.94},{ 4.65,34.34},{74.99,126.14},{45.12,99.17},
{75.16,120.98},{20.96,59.97},{62.03,120.94},{29.02,57.61},
{50.10,109.93},{25.70,67.21},{53.37,121.01},{69.83,106.86},
{98.40,167.39},{62.79,103.27},{73.46,129.78},{94.55,146.92},
{71.17,128.64},{ 9.89,61.96},{71.64,99.54},{ 4.08,39.81},
{86.30,139.84},{57.12,119.67},{23.03,69.35},{13.05,50.76},
{87.04,165.95},{ 6.30,42.04},{46.82,111.49},{29.52,74.86},
{57.05,102.30},{44.34,107.69},{85.27,135.83},{10.74,47.09},
{45.03,90.95},{35.25,98.93},{49.98,101.17},{62.33,110.82},
{83.44,128.79},{49.31,80.08},{98.90,162.01},{73.81,113.52},
{17.62,50.47},{63.13,116.08},{13.46,51.40},{27.67,70.81},
{ 5.54,38.33},{83.67,145.25},{49.77,93.82},{44.96,117.71},
{90.09,165.72},{29.28,76.51},{ 2.22,36.30},{ 6.36,32.50},
{96.08,156.79},{62.72,119.35},{ 2.24,32.55},{85.64,154.80},
{76.93,148.22},{ 3.74,32.51},{80.07,129.92},{ 7.57,47.93},
{ 9.00,52.97},{72.69,123.67},{72.76,149.85},{45.74,103.78},
{66.00,121.69},{59.09,124.34},{ 4.60,47.04},{69.41,133.60},
{ 9.09,63.43},{45.74,103.93},{56.63,111.44},{76.34,136.06},
{97.77,165.84},{80.20,147.33},{16.71,43.05},{13.95,65.55},
{96.06,172.59},{ 5.44,48.49},{12.06,58.95},{36.61,73.82},
{29.29,68.38},{37.42,90.24},{50.49,95.83},{47.45,110.52},
{40.96,92.52},{38.24,78.03},{55.77,105.52},{42.17,93.48},
{40.53,82.97},{22.55,73.32},{20.23,65.30},{ 1.88,54.37},
{ 6.84,45.82},{73.05,130.63},{83.94,158.91},{90.96,161.00},
{10.94,41.62},{78.06,146.69},{45.97,112.26},{79.55,133.24},
{ 0.25,28.96},{98.80,171.38},{95.77,176.06},{ 0.82,31.52},
{40.03,96.99},{22.06,81.54},{21.25,57.42},{26.64,60.34},
{ 3.87,29.19},{79.36,135.16},{88.96,145.04},{90.13,146.75},
{42.63,85.20},{68.11,135.21},{94.64,163.26},{31.01,76.30},
{78.95,132.19},{89.73,153.83},{24.83,65.97},{18.69,50.48},
{45.29,74.50},{ 2.68,37.48},{75.57,134.16},{37.04,97.58},
{53.59,88.16},{66.96,141.13},{ 8.31,61.03},{ 4.53,57.53},
{41.66,97.85},{72.11,132.50},{71.86,138.16},{72.87,121.14},
{87.34,143.48},{95.03,141.95},{85.67,167.74},{83.99,148.48},
{84.18,136.06},{59.05,110.64},{75.45,148.87},{10.60,48.52},
{81.85,144.43},{29.44,76.63},{10.76,52.81},{46.80,103.01},
{10.39,47.90},{35.43,82.97},{11.96,52.36},{41.33,74.35},
{34.32,100.25},{90.90,160.50},{89.02,144.90},{32.94,76.30},
{35.15,67.46},{49.81,105.50},{58.97,114.68},{61.15,85.18},
{53.52,105.59},{80.53,136.80},{ 8.25,57.08},{88.87,142.03},
{63.05,116.95},{50.19,110.48},{32.94,72.63},{75.91,128.73},
{98.59,162.18},{ 6.66,53.32},{66.68,116.98},{46.53,95.66},
{80.56,157.54},{ 4.04,39.16},{84.05,142.18},{ 8.82,51.97},
{94.05,154.96},{14.54,71.12},{51.22,96.16},{53.00,102.82},
{65.99,122.14},{80.88,146.18},{ 2.80,28.08},{62.66,127.25},
{81.08,135.03},{11.56,58.03},{46.56,85.84},{48.72,108.46},
{34.11,77.80},{25.72,69.80},{91.39,149.30},{54.37,99.88},
{46.36,107.12},{35.15,75.41},{57.05,105.71},{41.58,83.45},
{22.83,62.83},{23.79,76.76},{53.68,97.62},{40.51,85.35},
{50.73,98.67},{64.43,125.08},{92.16,155.77},{56.76,110.60},
{20.14,61.83},{72.47,131.58},{78.09,141.29},{ 3.64,32.93},
{ 6.73,65.31},{30.47,75.97},{44.28,85.69},{96.01,159.51},
{10.44,53.52},{45.94,100.93},{35.94,92.95},{79.84,148.83},
{42.10,83.36},{48.75,107.21},{66.31,129.78},{ 8.26,45.71},
{19.01,57.81},{12.89,52.33},{34.53,79.61},{57.75,104.38},
{47.06,97.03},{41.79,96.89},{21.96,67.98},{29.73,75.70},
{ 6.13,26.46},{13.22,76.73},{66.13,103.21},{18.58,62.21},
{30.37,42.98},{20.71,54.73},{63.10,130.17},{52.73,105.14},
{38.51,89.64},{10.37,52.75},{13.14,53.41},{23.17,57.08},
{96.43,158.01},{71.44,124.79},{38.76,93.08},{50.23,112.39},
{84.90,144.26},{37.25,88.01},{ 6.49,42.67},{64.16,100.96},
{ 1.50,44.62},{29.76,60.69},{67.03,111.56},{31.42,70.82},
{85.35,142.18},{59.23,107.16},{64.07,104.07},{90.84,151.87},
{75.77,135.87},{59.10,99.71},{88.05,157.91},{25.65,70.84},
{95.05,154.88},{65.83,127.48},{90.27,149.50},{15.15,43.70},
{83.34,130.41},{53.43,118.60},{68.00,121.65},{95.21,160.78},
{74.74,150.54},{42.66,79.34},{25.30,53.27},{25.99,85.53},
{43.21,90.96},{13.26,49.37},{41.67,77.87},{47.19,88.81},
{90.32,166.44},{19.76,60.65},{81.70,130.94},{88.04,138.64},
{32.64,87.28},{52.97,94.55},{ 6.54,44.90},{12.62,62.43},
{ 1.99,60.37},{24.10,66.02},{39.88,97.35},{85.36,149.25},
{89.75,149.66},{85.48,135.83},{64.70,115.71},{99.39,155.02},
{66.68,111.85},{88.72,146.10},{24.89,49.71},{97.13,163.31},
{ 8.04,42.95},{72.37,123.91},{ 6.28,47.33},{ 9.04,36.47},
{27.92,52.38},{76.89,125.98},{86.88,149.43},{62.49,113.11},
{75.93,136.43},{39.81,88.85},{37.76,78.30},{48.62,105.76},
{11.06,47.21},{21.55,64.30},{18.34,65.05},{60.29,94.85},
{97.17,144.12},{55.76,94.79},{74.14,112.63},{50.67,102.12},
{78.33,147.34},{87.66,139.50},{95.28,142.74},{15.79,60.86},
{51.55,86.75},{66.70,117.50},{16.42,74.91},{96.96,173.36},
{72.45,117.10},{82.60,150.17},{67.67,130.28},{ 1.46,40.37},
{65.45,118.92},{80.27,139.24},{88.31,144.56},{77.83,139.24},
{16.50,61.86},{ 3.68,43.30},{86.42,146.72},{82.20,144.05},
{60.26,122.63},{35.91,79.84},{ 6.38,36.90},{61.15,133.48},
{75.59,130.90},{66.25,122.05},{39.81,65.67},{22.03,72.67},
{49.81,97.68},{42.75,72.90},{79.72,135.65},{14.02,43.73},
{50.97,113.88},{25.92,75.34},{71.34,131.50},{90.16,159.00},
{90.00,173.48},{ 5.93,32.51},{93.47,164.66},{80.15,137.14},
{96.50,161.72},{ 5.22,36.44},{59.09,127.71},{67.61,142.50},
{37.95,72.89},{36.28,80.51},{ 1.75,37.11},{32.50,81.37},
{68.29,110.92},{ 3.19,38.92},{10.42,47.43},{23.38,68.25},
{ 1.24,39.85},{95.36,147.53},{14.70,39.25},{16.27,49.69},
{78.54,121.12},{20.68,61.07},{89.24,153.20},{37.41,92.99},
{31.54,72.54},{ 9.04,36.12},{71.16,157.70},{40.54,101.05},
{87.40,146.76},{40.03,64.89},{65.93,106.88},{51.99,91.49},
{30.11,69.89},{ 4.20,29.63},{72.94,121.91},{84.03,140.15},
{18.21,65.22},{22.75,72.70},{ 5.03,62.94},{84.19,121.12},
{49.73,109.18},{50.97,96.52},{17.84,61.19},{22.23,63.79},
{98.64,161.96},{47.67,98.78},{95.89,164.82},{17.60,40.57},
{19.55,60.32},{39.65,100.65},{78.04,145.50},{21.25,57.30},
{75.44,132.79},{20.74,51.29},{99.76,167.69},{24.02,68.15},
{83.83,144.62},{28.83,79.70},{81.39,140.50},{54.20,114.42},
{65.66,114.08},{38.43,82.74},{45.69,81.81},{30.16,71.89},
{ 5.60,54.27},{83.32,146.93},{11.91,37.69},{72.86,145.12},
{94.26,157.64},{77.50,145.38},{28.53,70.20},{62.64,144.67},
{46.98,87.65},{17.94,66.43},{94.83,154.61},{70.00,115.57},
{81.49,146.60},{53.42,112.37},{73.41,122.83},{28.85,77.99},
{61.51,103.53},{ 9.43,45.86},{61.79,112.81},{22.91,62.04},
{18.97,73.47},{71.89,125.20},{21.33,49.19},{60.95,107.95},
{50.48,100.19},{44.09,102.06},{90.72,162.02},{54.67,95.87},
{80.13,146.92},{19.49,64.20},{22.27,51.03},{65.80,125.90},
{84.97,142.32},{61.33,129.41},{81.98,151.09},{41.93,94.51},
{69.72,122.51},{20.44,59.72},{52.94,92.47},{53.87,108.83},
{66.10,131.38},{53.89,118.39},{90.61,141.08},{ 1.48,43.49},
{55.65,104.78},{15.90,60.60},{46.88,105.80},{64.44,112.85},
{52.33,117.11},{85.09,153.46},{73.22,115.56},{ 3.81,49.12},
{10.66,30.84},{23.27,55.05},{48.66,109.58},{23.29,50.31},
{88.92,150.73},{26.52,73.72},{65.10,115.43},{17.14,69.33},
{90.44,164.86},{40.92,92.54},{29.13,54.22},{36.01,92.08},
{62.79,95.56},{21.66,69.26},{41.24,83.40},{22.49,75.67},
{60.91,120.06},{94.45,165.13},{13.20,56.43},{59.92,90.43},
{39.19,80.79},{76.59,139.35},{36.67,81.34},{11.06,32.61},
{88.81,151.04},{44.19,86.43},{98.74,170.51},{14.20,57.27},
{ 0.12,34.46},{80.95,146.47},{80.91,137.87},{41.60,89.96},
{74.73,146.95},{10.15,34.76},{99.40,156.65},{ 2.58,40.48},
{97.86,172.37},{78.82,139.27},{58.57,109.60},{96.57,169.35},
{79.00,152.23},{39.99,94.14},{66.95,126.50},{59.33,105.83},
{13.71,60.63},{45.88,100.72},{ 5.73,42.26},{73.24,138.38},
{18.70,59.33},{44.16,103.88},{18.93,63.40},{ 8.89,56.46},
{64.87,119.64},{59.27,128.50},{65.70,125.98},{31.45,76.90},
{47.62,106.65},{55.24,102.65},{66.98,129.90},{67.20,120.15},
{82.89,160.45},{87.63,156.09},{86.84,154.94},{49.71,106.31},
{81.13,141.18},{83.95,148.70},{24.82,68.16},{ 6.29,36.96},
{45.53,100.22},{54.86,118.40},{20.11,73.23},{36.27,77.63},
{34.99,87.72},{82.93,147.98},{15.79,47.57},{16.52,38.24},
{41.72,91.70},{88.28,162.99},{41.99,86.34},{19.14,71.88},
{46.82,92.30},{63.26,119.18},{95.62,168.26},{16.65,53.28},
{37.05,97.31},{23.12,52.65},{94.77,164.76},{92.08,141.33},
{73.24,117.79},{26.84,57.89},{79.50,144.53},{ 4.19,28.60},
{72.43,135.74},{53.96,102.81},{34.51,71.36},{ 8.26,36.34},
{70.16,133.65},{58.46,96.95},{95.49,147.11},{61.54,129.50},
{53.80,99.09},{20.07,70.56},{92.32,161.17},{77.15,131.94},
{13.48,47.35},{98.88,169.61},{54.80,84.26},{29.52,77.65},
{46.78,81.14},{50.98,100.42},{34.22,71.59},{92.79,162.00},
{41.44,107.55},{65.00,105.16},{25.10,75.73},{ 5.68,47.49},
{55.63,122.32},{59.70,105.98},{ 0.83,18.38},{93.49,170.66},
{74.24,125.12},{21.73,56.04},{69.00,129.79},{74.33,131.77},
{87.29,162.96},{49.45,108.64},{39.85,95.13},{65.94,128.56},
{96.42,164.68},{75.72,135.47},{74.00,128.64},{22.69,79.03},
{16.49,49.83},{51.75,105.92},{18.35,39.89},{12.11,47.51},
{11.40,53.07},{42.69,75.97},{34.09,90.98},{89.58,138.92},
{61.38,116.03},{15.99,53.12},{51.36,98.31},{ 8.43,41.23},
{99.48,160.38},{28.02,72.25},{18.56,67.65},{20.40,75.66},
{ 9.16,51.61},{16.99,69.22},{16.63,62.37},{98.07,172.62},
{ 2.13,37.96},{34.13,90.66},{46.26,90.77},{91.73,155.07},
{38.47,84.49},{62.03,123.28},{22.39,52.12},{32.11,73.83},
{90.83,141.57},{55.57,125.48},{31.37,72.59},{74.83,150.41},
{84.81,158.26},{68.49,137.47},{28.18,66.69},{30.45,95.28},
{35.25,85.16},{68.88,111.04},{69.27,138.32},{99.21,173.21},
{12.99,44.58},{33.35,93.76},{51.33,90.40},{61.72,112.41},
{59.57,115.05},{68.79,118.10},{43.68,103.93},{28.34,72.36},
{65.11,117.06},{80.55,143.77},{19.12,65.14},{19.35,73.32},
{ 5.25,43.43},{61.76,111.72},{72.75,138.36},{57.36,101.49},
{49.69,113.93},{86.72,139.78},{87.23,144.77},{82.63,140.81},
{86.35,146.08},{85.91,147.89},{98.85,174.96},{92.35,159.01},
{25.75,70.99},{39.70,81.86},{ 3.86,33.06},{61.49,112.99},
{55.07,136.48},{70.31,120.27},{74.20,122.49},{76.62,139.04},
{59.92,107.95},{67.72,150.15},{90.39,159.74},{56.12,99.84},
{25.27,65.76},{47.30,88.16},{87.88,125.22},{66.52,121.60},
{56.18,105.57},{23.84,65.30},{47.42,103.93},{14.72,54.60},
{55.42,93.79},{72.59,123.12},{97.52,153.87},{57.87,111.74},
{16.32,67.04},{61.16,108.39},{10.41,59.99},{21.46,50.16},
{88.81,161.65},{87.42,146.69},{58.95,125.92},{76.51,138.78},
{ 9.07,60.01},{23.03,70.96},{ 0.74,43.37},{94.22,142.83},
{39.50,74.27},{ 9.36,54.88},{39.38,108.91},{47.11,97.19},
{ 8.02,27.25},{ 2.14,30.21},{ 2.24,47.15},{28.53,75.91},
{53.16,116.06},{67.95,131.53},{39.90,96.56},{ 4.89,46.30},
{96.71,151.69},{52.71,86.57},{72.33,127.71},{57.81,113.64},
{20.66,50.51},{60.82,122.96},{52.86,93.88},{14.65,64.75},
{74.36,132.70},{46.84,81.11},{ 3.79,32.15},{39.85,87.00},
{42.20,88.52},{78.22,130.13},{93.58,152.52},{57.03,92.98},
{26.96,71.50},{ 3.42,36.65},{ 2.61,34.84},{88.96,150.75},
{92.04,157.28},{51.04,108.19},{59.44,120.82},{55.34,95.53},
{41.00,96.36},{59.79,131.01},{30.89,63.48},{43.47,90.16},
{18.84,72.90},{42.70,78.42},{44.85,90.97},{41.23,99.03},
{16.14,52.82},{10.22,69.66},{86.11,150.33},{43.47,96.18},
{97.45,180.39},{31.67,77.81},{75.57,130.89},{16.87,45.23},
{ 6.68,42.93},{11.99,46.31},{93.15,165.13},{25.97,61.79},
{ 1.98,52.17},{50.93,91.84},{19.96,38.01},{51.04,110.55},
{ 2.94,44.35},{38.64,78.52},{87.43,142.52},{67.31,141.90},
{97.56,162.61},{23.24,58.72},{88.40,126.06},{97.41,152.38},
{ 8.99,60.09},{62.95,121.42},{39.19,78.97},{68.34,124.26},
{67.92,126.91},{18.55,59.65},{ 0.52,42.03},{63.22,127.39},
{61.12,108.44},{38.83,76.44},{75.92,123.50},{24.70,61.13},
{34.53,63.04},{30.55,69.85},{93.81,158.14},{17.02,58.94},
{39.86,86.69},{13.91,43.15},{43.07,80.31},{14.22,52.39},
{28.01,64.04},{17.66,51.30},{64.87,127.50},{68.69,129.09},
{ 3.99,46.66},{27.77,79.85},{82.46,133.97},{11.77,51.57},
{ 3.29,42.13},{28.30,80.83},{56.98,102.61},{41.17,97.33},
{50.10,94.36},{89.95,144.63},{13.52,43.10},{38.27,106.86},
{29.52,59.80},{78.72,146.92},{34.18,96.12},{85.06,152.50},
{79.77,122.94},{36.97,84.69},{16.15,48.64},{80.74,110.63},
{73.75,133.85},{98.49,171.85},{22.60,60.53},{49.58,112.72},
{35.70,75.85},{55.94,117.99},{21.88,51.47},{14.56,45.53},
{12.98,48.68},{61.74,108.64},{84.13,156.45},{10.53,63.37},
{67.73,117.73},{28.39,78.10},{83.21,138.03},{76.86,135.79},
{67.45,121.59},{54.79,102.89},{87.09,145.27},{78.89,141.41},
{93.95,154.16},{82.44,149.57},{46.98,99.33},{52.73,110.86},
{74.92,127.56},{18.70,67.47},{28.05,67.85},{17.31,50.26},
{51.58,107.92},{ 6.23,51.92},{ 3.91,30.74},{69.02,125.15},
{80.46,138.83},{35.14,80.49},{92.95,163.01},{ 8.26,53.66},
{39.88,96.76},{55.01,105.77},{55.70,105.97},{ 7.84,49.25},
{ 7.46,32.19},{ 6.66,43.31},{82.11,133.48},{87.68,144.55},
{ 9.06,45.72},{50.11,90.64},{85.47,162.49},{53.97,96.09},
{ 3.95,43.61},{70.93,114.76},{63.70,121.28},{12.35,41.48},
{61.28,108.55},{36.19,71.01},{ 5.82,46.69},{31.71,88.30},
{70.95,121.80},{28.23,69.52},{ 7.46,38.60},{85.07,137.40},
{38.88,85.77},{41.81,81.44},{ 9.77,46.36},{84.85,146.87},
{49.52,113.65},{58.38,108.35},{19.87,65.23},{71.50,130.83},
{71.13,127.30},{80.05,139.42},{27.85,76.60},{37.16,76.01}
};
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 )
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
error = hipMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "hipMalloc on d_dm returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "hipMalloc on d_dc returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "hipMalloc on d_error_sum_arr returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "hipMalloc on d_data returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
error = hipMemcpy(d_dm, dm, (sizeof(double) * 8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_dm returned %d %s\n", error,
hipGetErrorString(error));
}
error = hipMemcpy(d_dc, dc, (sizeof(double) * 8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_dc returned %d %s\n", error,
hipGetErrorString(error));
}
error = hipMemcpy(d_data, data, sizeof(data), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_data returned %d %s\n", error,
hipGetErrorString(error));
}
for(i=0;i<8;i++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
hipLaunchKernelGGL(( d_rms_error) , dim3(100),dim3(10), 0, 0, &d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
hipDeviceSynchronize();
error = hipMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), hipMemcpyDeviceToHost);
if(error){
fprintf(stderr, "hipMemcpy to error_sum returned %d %s\n", error,
hipGetErrorString(error));
}
for(int j=0; j<n_data; j++) {
//Add each error sum to the error sum total.
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
//Reset the error sum total.
error_sum_total = 0;
}
printf("best m,c is %lf,%lf with error %lf in direction %d\n",
dm[best_error_i], dc[best_error_i], best_error, best_error_i);
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
//Free memory for d_dm
error = hipFree(d_dm);
if(error){
fprintf(stderr, "hipFree on d_dm returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
//Free memory for d_dc
error = hipFree(d_dc);
if(error){
fprintf(stderr, "hipFree on d_dc returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_data);
if(error){
fprintf(stderr, "hipFree on d_data returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_error_sum_arr);
if(error){
fprintf(stderr, "hipFree on d_error_sum_arr returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
| 988b2b29cac2270074e5851088bdf285667145b4.cu | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{70.49,130.80},{69.15,120.96},{73.09,128.52},{86.47,121.82},
{79.33,135.78},{82.97,146.94},{78.52,137.85},{73.39,136.96},
{75.61,114.34},{79.52,146.23},{76.70,133.10},{65.33,122.13},
{79.50,129.38},{85.38,157.06},{72.33,133.95},{35.91,75.40},
{80.77,143.67},{52.68,89.78},{36.13,77.22},{83.90,142.29},
{59.04,106.05},{71.12,130.59},{32.06,71.46},{40.77,93.83},
{46.91,81.58},{94.85,156.72},{12.06,43.28},{ 0.71,30.59},
{21.39,61.08},{88.48,157.99},{50.25,118.75},{31.08,96.31},
{58.31,100.58},{27.50,76.70},{10.69,38.32},{95.66,150.32},
{15.97,68.61},{80.36,139.70},{71.97,130.69},{ 7.44,39.10},
{74.91,138.56},{ 5.87,38.76},{38.96,94.86},{ 8.20,38.92},
{57.23,103.56},{29.79,82.30},{64.80,112.26},{20.84,60.04},
{82.50,136.68},{12.69,56.76},{ 7.73,41.58},{79.38,150.25},
{99.73,182.45},{64.77,107.66},{38.09,78.58},{75.72,143.27},
{93.61,171.88},{46.59,116.39},{31.18,76.09},{89.36,167.09},
{57.97,100.04},{30.65,59.97},{57.40,107.18},{78.58,141.45},
{28.35,66.02},{72.36,132.13},{12.30,50.43},{42.29,86.56},
{42.23,74.52},{91.38,147.51},{35.52,95.32},{75.56,144.53},
{92.61,164.53},{21.57,81.55},{81.52,146.53},{62.55,108.13},
{45.86,94.30},{27.38,56.80},{98.47,164.28},{79.60,132.17},
{49.49,101.96},{99.59,171.38},{91.11,158.91},{52.61,99.92},
{51.00,105.16},{46.34,97.15},{60.72,90.68},{53.70,116.41},
{16.92,71.76},{75.14,129.96},{56.44,114.93},{63.62,122.81},
{87.88,145.18},{93.38,150.65},{24.15,70.67},{70.89,125.06},
{57.82,120.58},{27.41,77.74},{35.24,76.88},{75.73,146.45},
{72.88,135.76},{56.38,118.97},{ 9.35,53.94},{ 6.65,43.07},
{92.78,145.83},{60.23,103.33},{82.59,138.65},{82.59,147.20},
{ 5.38,47.28},{ 9.85,58.44},{45.16,104.55},{20.85,57.53},
{93.27,159.89},{56.15,94.30},{34.84,84.45},{49.78,105.58},
{24.81,73.02},{12.36,45.98},{51.76,116.24},{14.80,50.91},
{58.76,114.94},{ 4.65,34.34},{74.99,126.14},{45.12,99.17},
{75.16,120.98},{20.96,59.97},{62.03,120.94},{29.02,57.61},
{50.10,109.93},{25.70,67.21},{53.37,121.01},{69.83,106.86},
{98.40,167.39},{62.79,103.27},{73.46,129.78},{94.55,146.92},
{71.17,128.64},{ 9.89,61.96},{71.64,99.54},{ 4.08,39.81},
{86.30,139.84},{57.12,119.67},{23.03,69.35},{13.05,50.76},
{87.04,165.95},{ 6.30,42.04},{46.82,111.49},{29.52,74.86},
{57.05,102.30},{44.34,107.69},{85.27,135.83},{10.74,47.09},
{45.03,90.95},{35.25,98.93},{49.98,101.17},{62.33,110.82},
{83.44,128.79},{49.31,80.08},{98.90,162.01},{73.81,113.52},
{17.62,50.47},{63.13,116.08},{13.46,51.40},{27.67,70.81},
{ 5.54,38.33},{83.67,145.25},{49.77,93.82},{44.96,117.71},
{90.09,165.72},{29.28,76.51},{ 2.22,36.30},{ 6.36,32.50},
{96.08,156.79},{62.72,119.35},{ 2.24,32.55},{85.64,154.80},
{76.93,148.22},{ 3.74,32.51},{80.07,129.92},{ 7.57,47.93},
{ 9.00,52.97},{72.69,123.67},{72.76,149.85},{45.74,103.78},
{66.00,121.69},{59.09,124.34},{ 4.60,47.04},{69.41,133.60},
{ 9.09,63.43},{45.74,103.93},{56.63,111.44},{76.34,136.06},
{97.77,165.84},{80.20,147.33},{16.71,43.05},{13.95,65.55},
{96.06,172.59},{ 5.44,48.49},{12.06,58.95},{36.61,73.82},
{29.29,68.38},{37.42,90.24},{50.49,95.83},{47.45,110.52},
{40.96,92.52},{38.24,78.03},{55.77,105.52},{42.17,93.48},
{40.53,82.97},{22.55,73.32},{20.23,65.30},{ 1.88,54.37},
{ 6.84,45.82},{73.05,130.63},{83.94,158.91},{90.96,161.00},
{10.94,41.62},{78.06,146.69},{45.97,112.26},{79.55,133.24},
{ 0.25,28.96},{98.80,171.38},{95.77,176.06},{ 0.82,31.52},
{40.03,96.99},{22.06,81.54},{21.25,57.42},{26.64,60.34},
{ 3.87,29.19},{79.36,135.16},{88.96,145.04},{90.13,146.75},
{42.63,85.20},{68.11,135.21},{94.64,163.26},{31.01,76.30},
{78.95,132.19},{89.73,153.83},{24.83,65.97},{18.69,50.48},
{45.29,74.50},{ 2.68,37.48},{75.57,134.16},{37.04,97.58},
{53.59,88.16},{66.96,141.13},{ 8.31,61.03},{ 4.53,57.53},
{41.66,97.85},{72.11,132.50},{71.86,138.16},{72.87,121.14},
{87.34,143.48},{95.03,141.95},{85.67,167.74},{83.99,148.48},
{84.18,136.06},{59.05,110.64},{75.45,148.87},{10.60,48.52},
{81.85,144.43},{29.44,76.63},{10.76,52.81},{46.80,103.01},
{10.39,47.90},{35.43,82.97},{11.96,52.36},{41.33,74.35},
{34.32,100.25},{90.90,160.50},{89.02,144.90},{32.94,76.30},
{35.15,67.46},{49.81,105.50},{58.97,114.68},{61.15,85.18},
{53.52,105.59},{80.53,136.80},{ 8.25,57.08},{88.87,142.03},
{63.05,116.95},{50.19,110.48},{32.94,72.63},{75.91,128.73},
{98.59,162.18},{ 6.66,53.32},{66.68,116.98},{46.53,95.66},
{80.56,157.54},{ 4.04,39.16},{84.05,142.18},{ 8.82,51.97},
{94.05,154.96},{14.54,71.12},{51.22,96.16},{53.00,102.82},
{65.99,122.14},{80.88,146.18},{ 2.80,28.08},{62.66,127.25},
{81.08,135.03},{11.56,58.03},{46.56,85.84},{48.72,108.46},
{34.11,77.80},{25.72,69.80},{91.39,149.30},{54.37,99.88},
{46.36,107.12},{35.15,75.41},{57.05,105.71},{41.58,83.45},
{22.83,62.83},{23.79,76.76},{53.68,97.62},{40.51,85.35},
{50.73,98.67},{64.43,125.08},{92.16,155.77},{56.76,110.60},
{20.14,61.83},{72.47,131.58},{78.09,141.29},{ 3.64,32.93},
{ 6.73,65.31},{30.47,75.97},{44.28,85.69},{96.01,159.51},
{10.44,53.52},{45.94,100.93},{35.94,92.95},{79.84,148.83},
{42.10,83.36},{48.75,107.21},{66.31,129.78},{ 8.26,45.71},
{19.01,57.81},{12.89,52.33},{34.53,79.61},{57.75,104.38},
{47.06,97.03},{41.79,96.89},{21.96,67.98},{29.73,75.70},
{ 6.13,26.46},{13.22,76.73},{66.13,103.21},{18.58,62.21},
{30.37,42.98},{20.71,54.73},{63.10,130.17},{52.73,105.14},
{38.51,89.64},{10.37,52.75},{13.14,53.41},{23.17,57.08},
{96.43,158.01},{71.44,124.79},{38.76,93.08},{50.23,112.39},
{84.90,144.26},{37.25,88.01},{ 6.49,42.67},{64.16,100.96},
{ 1.50,44.62},{29.76,60.69},{67.03,111.56},{31.42,70.82},
{85.35,142.18},{59.23,107.16},{64.07,104.07},{90.84,151.87},
{75.77,135.87},{59.10,99.71},{88.05,157.91},{25.65,70.84},
{95.05,154.88},{65.83,127.48},{90.27,149.50},{15.15,43.70},
{83.34,130.41},{53.43,118.60},{68.00,121.65},{95.21,160.78},
{74.74,150.54},{42.66,79.34},{25.30,53.27},{25.99,85.53},
{43.21,90.96},{13.26,49.37},{41.67,77.87},{47.19,88.81},
{90.32,166.44},{19.76,60.65},{81.70,130.94},{88.04,138.64},
{32.64,87.28},{52.97,94.55},{ 6.54,44.90},{12.62,62.43},
{ 1.99,60.37},{24.10,66.02},{39.88,97.35},{85.36,149.25},
{89.75,149.66},{85.48,135.83},{64.70,115.71},{99.39,155.02},
{66.68,111.85},{88.72,146.10},{24.89,49.71},{97.13,163.31},
{ 8.04,42.95},{72.37,123.91},{ 6.28,47.33},{ 9.04,36.47},
{27.92,52.38},{76.89,125.98},{86.88,149.43},{62.49,113.11},
{75.93,136.43},{39.81,88.85},{37.76,78.30},{48.62,105.76},
{11.06,47.21},{21.55,64.30},{18.34,65.05},{60.29,94.85},
{97.17,144.12},{55.76,94.79},{74.14,112.63},{50.67,102.12},
{78.33,147.34},{87.66,139.50},{95.28,142.74},{15.79,60.86},
{51.55,86.75},{66.70,117.50},{16.42,74.91},{96.96,173.36},
{72.45,117.10},{82.60,150.17},{67.67,130.28},{ 1.46,40.37},
{65.45,118.92},{80.27,139.24},{88.31,144.56},{77.83,139.24},
{16.50,61.86},{ 3.68,43.30},{86.42,146.72},{82.20,144.05},
{60.26,122.63},{35.91,79.84},{ 6.38,36.90},{61.15,133.48},
{75.59,130.90},{66.25,122.05},{39.81,65.67},{22.03,72.67},
{49.81,97.68},{42.75,72.90},{79.72,135.65},{14.02,43.73},
{50.97,113.88},{25.92,75.34},{71.34,131.50},{90.16,159.00},
{90.00,173.48},{ 5.93,32.51},{93.47,164.66},{80.15,137.14},
{96.50,161.72},{ 5.22,36.44},{59.09,127.71},{67.61,142.50},
{37.95,72.89},{36.28,80.51},{ 1.75,37.11},{32.50,81.37},
{68.29,110.92},{ 3.19,38.92},{10.42,47.43},{23.38,68.25},
{ 1.24,39.85},{95.36,147.53},{14.70,39.25},{16.27,49.69},
{78.54,121.12},{20.68,61.07},{89.24,153.20},{37.41,92.99},
{31.54,72.54},{ 9.04,36.12},{71.16,157.70},{40.54,101.05},
{87.40,146.76},{40.03,64.89},{65.93,106.88},{51.99,91.49},
{30.11,69.89},{ 4.20,29.63},{72.94,121.91},{84.03,140.15},
{18.21,65.22},{22.75,72.70},{ 5.03,62.94},{84.19,121.12},
{49.73,109.18},{50.97,96.52},{17.84,61.19},{22.23,63.79},
{98.64,161.96},{47.67,98.78},{95.89,164.82},{17.60,40.57},
{19.55,60.32},{39.65,100.65},{78.04,145.50},{21.25,57.30},
{75.44,132.79},{20.74,51.29},{99.76,167.69},{24.02,68.15},
{83.83,144.62},{28.83,79.70},{81.39,140.50},{54.20,114.42},
{65.66,114.08},{38.43,82.74},{45.69,81.81},{30.16,71.89},
{ 5.60,54.27},{83.32,146.93},{11.91,37.69},{72.86,145.12},
{94.26,157.64},{77.50,145.38},{28.53,70.20},{62.64,144.67},
{46.98,87.65},{17.94,66.43},{94.83,154.61},{70.00,115.57},
{81.49,146.60},{53.42,112.37},{73.41,122.83},{28.85,77.99},
{61.51,103.53},{ 9.43,45.86},{61.79,112.81},{22.91,62.04},
{18.97,73.47},{71.89,125.20},{21.33,49.19},{60.95,107.95},
{50.48,100.19},{44.09,102.06},{90.72,162.02},{54.67,95.87},
{80.13,146.92},{19.49,64.20},{22.27,51.03},{65.80,125.90},
{84.97,142.32},{61.33,129.41},{81.98,151.09},{41.93,94.51},
{69.72,122.51},{20.44,59.72},{52.94,92.47},{53.87,108.83},
{66.10,131.38},{53.89,118.39},{90.61,141.08},{ 1.48,43.49},
{55.65,104.78},{15.90,60.60},{46.88,105.80},{64.44,112.85},
{52.33,117.11},{85.09,153.46},{73.22,115.56},{ 3.81,49.12},
{10.66,30.84},{23.27,55.05},{48.66,109.58},{23.29,50.31},
{88.92,150.73},{26.52,73.72},{65.10,115.43},{17.14,69.33},
{90.44,164.86},{40.92,92.54},{29.13,54.22},{36.01,92.08},
{62.79,95.56},{21.66,69.26},{41.24,83.40},{22.49,75.67},
{60.91,120.06},{94.45,165.13},{13.20,56.43},{59.92,90.43},
{39.19,80.79},{76.59,139.35},{36.67,81.34},{11.06,32.61},
{88.81,151.04},{44.19,86.43},{98.74,170.51},{14.20,57.27},
{ 0.12,34.46},{80.95,146.47},{80.91,137.87},{41.60,89.96},
{74.73,146.95},{10.15,34.76},{99.40,156.65},{ 2.58,40.48},
{97.86,172.37},{78.82,139.27},{58.57,109.60},{96.57,169.35},
{79.00,152.23},{39.99,94.14},{66.95,126.50},{59.33,105.83},
{13.71,60.63},{45.88,100.72},{ 5.73,42.26},{73.24,138.38},
{18.70,59.33},{44.16,103.88},{18.93,63.40},{ 8.89,56.46},
{64.87,119.64},{59.27,128.50},{65.70,125.98},{31.45,76.90},
{47.62,106.65},{55.24,102.65},{66.98,129.90},{67.20,120.15},
{82.89,160.45},{87.63,156.09},{86.84,154.94},{49.71,106.31},
{81.13,141.18},{83.95,148.70},{24.82,68.16},{ 6.29,36.96},
{45.53,100.22},{54.86,118.40},{20.11,73.23},{36.27,77.63},
{34.99,87.72},{82.93,147.98},{15.79,47.57},{16.52,38.24},
{41.72,91.70},{88.28,162.99},{41.99,86.34},{19.14,71.88},
{46.82,92.30},{63.26,119.18},{95.62,168.26},{16.65,53.28},
{37.05,97.31},{23.12,52.65},{94.77,164.76},{92.08,141.33},
{73.24,117.79},{26.84,57.89},{79.50,144.53},{ 4.19,28.60},
{72.43,135.74},{53.96,102.81},{34.51,71.36},{ 8.26,36.34},
{70.16,133.65},{58.46,96.95},{95.49,147.11},{61.54,129.50},
{53.80,99.09},{20.07,70.56},{92.32,161.17},{77.15,131.94},
{13.48,47.35},{98.88,169.61},{54.80,84.26},{29.52,77.65},
{46.78,81.14},{50.98,100.42},{34.22,71.59},{92.79,162.00},
{41.44,107.55},{65.00,105.16},{25.10,75.73},{ 5.68,47.49},
{55.63,122.32},{59.70,105.98},{ 0.83,18.38},{93.49,170.66},
{74.24,125.12},{21.73,56.04},{69.00,129.79},{74.33,131.77},
{87.29,162.96},{49.45,108.64},{39.85,95.13},{65.94,128.56},
{96.42,164.68},{75.72,135.47},{74.00,128.64},{22.69,79.03},
{16.49,49.83},{51.75,105.92},{18.35,39.89},{12.11,47.51},
{11.40,53.07},{42.69,75.97},{34.09,90.98},{89.58,138.92},
{61.38,116.03},{15.99,53.12},{51.36,98.31},{ 8.43,41.23},
{99.48,160.38},{28.02,72.25},{18.56,67.65},{20.40,75.66},
{ 9.16,51.61},{16.99,69.22},{16.63,62.37},{98.07,172.62},
{ 2.13,37.96},{34.13,90.66},{46.26,90.77},{91.73,155.07},
{38.47,84.49},{62.03,123.28},{22.39,52.12},{32.11,73.83},
{90.83,141.57},{55.57,125.48},{31.37,72.59},{74.83,150.41},
{84.81,158.26},{68.49,137.47},{28.18,66.69},{30.45,95.28},
{35.25,85.16},{68.88,111.04},{69.27,138.32},{99.21,173.21},
{12.99,44.58},{33.35,93.76},{51.33,90.40},{61.72,112.41},
{59.57,115.05},{68.79,118.10},{43.68,103.93},{28.34,72.36},
{65.11,117.06},{80.55,143.77},{19.12,65.14},{19.35,73.32},
{ 5.25,43.43},{61.76,111.72},{72.75,138.36},{57.36,101.49},
{49.69,113.93},{86.72,139.78},{87.23,144.77},{82.63,140.81},
{86.35,146.08},{85.91,147.89},{98.85,174.96},{92.35,159.01},
{25.75,70.99},{39.70,81.86},{ 3.86,33.06},{61.49,112.99},
{55.07,136.48},{70.31,120.27},{74.20,122.49},{76.62,139.04},
{59.92,107.95},{67.72,150.15},{90.39,159.74},{56.12,99.84},
{25.27,65.76},{47.30,88.16},{87.88,125.22},{66.52,121.60},
{56.18,105.57},{23.84,65.30},{47.42,103.93},{14.72,54.60},
{55.42,93.79},{72.59,123.12},{97.52,153.87},{57.87,111.74},
{16.32,67.04},{61.16,108.39},{10.41,59.99},{21.46,50.16},
{88.81,161.65},{87.42,146.69},{58.95,125.92},{76.51,138.78},
{ 9.07,60.01},{23.03,70.96},{ 0.74,43.37},{94.22,142.83},
{39.50,74.27},{ 9.36,54.88},{39.38,108.91},{47.11,97.19},
{ 8.02,27.25},{ 2.14,30.21},{ 2.24,47.15},{28.53,75.91},
{53.16,116.06},{67.95,131.53},{39.90,96.56},{ 4.89,46.30},
{96.71,151.69},{52.71,86.57},{72.33,127.71},{57.81,113.64},
{20.66,50.51},{60.82,122.96},{52.86,93.88},{14.65,64.75},
{74.36,132.70},{46.84,81.11},{ 3.79,32.15},{39.85,87.00},
{42.20,88.52},{78.22,130.13},{93.58,152.52},{57.03,92.98},
{26.96,71.50},{ 3.42,36.65},{ 2.61,34.84},{88.96,150.75},
{92.04,157.28},{51.04,108.19},{59.44,120.82},{55.34,95.53},
{41.00,96.36},{59.79,131.01},{30.89,63.48},{43.47,90.16},
{18.84,72.90},{42.70,78.42},{44.85,90.97},{41.23,99.03},
{16.14,52.82},{10.22,69.66},{86.11,150.33},{43.47,96.18},
{97.45,180.39},{31.67,77.81},{75.57,130.89},{16.87,45.23},
{ 6.68,42.93},{11.99,46.31},{93.15,165.13},{25.97,61.79},
{ 1.98,52.17},{50.93,91.84},{19.96,38.01},{51.04,110.55},
{ 2.94,44.35},{38.64,78.52},{87.43,142.52},{67.31,141.90},
{97.56,162.61},{23.24,58.72},{88.40,126.06},{97.41,152.38},
{ 8.99,60.09},{62.95,121.42},{39.19,78.97},{68.34,124.26},
{67.92,126.91},{18.55,59.65},{ 0.52,42.03},{63.22,127.39},
{61.12,108.44},{38.83,76.44},{75.92,123.50},{24.70,61.13},
{34.53,63.04},{30.55,69.85},{93.81,158.14},{17.02,58.94},
{39.86,86.69},{13.91,43.15},{43.07,80.31},{14.22,52.39},
{28.01,64.04},{17.66,51.30},{64.87,127.50},{68.69,129.09},
{ 3.99,46.66},{27.77,79.85},{82.46,133.97},{11.77,51.57},
{ 3.29,42.13},{28.30,80.83},{56.98,102.61},{41.17,97.33},
{50.10,94.36},{89.95,144.63},{13.52,43.10},{38.27,106.86},
{29.52,59.80},{78.72,146.92},{34.18,96.12},{85.06,152.50},
{79.77,122.94},{36.97,84.69},{16.15,48.64},{80.74,110.63},
{73.75,133.85},{98.49,171.85},{22.60,60.53},{49.58,112.72},
{35.70,75.85},{55.94,117.99},{21.88,51.47},{14.56,45.53},
{12.98,48.68},{61.74,108.64},{84.13,156.45},{10.53,63.37},
{67.73,117.73},{28.39,78.10},{83.21,138.03},{76.86,135.79},
{67.45,121.59},{54.79,102.89},{87.09,145.27},{78.89,141.41},
{93.95,154.16},{82.44,149.57},{46.98,99.33},{52.73,110.86},
{74.92,127.56},{18.70,67.47},{28.05,67.85},{17.31,50.26},
{51.58,107.92},{ 6.23,51.92},{ 3.91,30.74},{69.02,125.15},
{80.46,138.83},{35.14,80.49},{92.95,163.01},{ 8.26,53.66},
{39.88,96.76},{55.01,105.77},{55.70,105.97},{ 7.84,49.25},
{ 7.46,32.19},{ 6.66,43.31},{82.11,133.48},{87.68,144.55},
{ 9.06,45.72},{50.11,90.64},{85.47,162.49},{53.97,96.09},
{ 3.95,43.61},{70.93,114.76},{63.70,121.28},{12.35,41.48},
{61.28,108.55},{36.19,71.01},{ 5.82,46.69},{31.71,88.30},
{70.95,121.80},{28.23,69.52},{ 7.46,38.60},{85.07,137.40},
{38.88,85.77},{41.81,81.44},{ 9.77,46.36},{84.85,146.87},
{49.52,113.65},{58.38,108.35},{19.87,65.23},{71.50,130.83},
{71.13,127.30},{80.05,139.42},{27.85,76.60},{37.16,76.01}
};
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 )
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
for(int j=0; j<n_data; j++) {
//Add each error sum to the error sum total.
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
//Reset the error sum total.
error_sum_total = 0;
}
printf("best m,c is %lf,%lf with error %lf in direction %d\n",
dm[best_error_i], dc[best_error_i], best_error, best_error_i);
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
//Free memory for d_dm
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_dc
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
b3a15c208835f3c934a140865556996b2fa99216.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by saleh on 10/8/18.
//
__global__ void kernel_relu(const float * __restrict__ g_idata, float * __restrict__ g_odata, unsigned long len){
unsigned long idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx<len){
g_odata[idx] = g_idata[idx]>0 ? g_idata[idx] : 0;
}
}
void activation_relu(
const float *g_idata,
float *g_odata,
unsigned long len){
unsigned long blocksize, gridsize;
blocksize = 256;
gridsize = (len + blocksize -1 )/blocksize;
hipLaunchKernelGGL(( kernel_relu), dim3(gridsize),dim3(blocksize), 0, 0, g_idata,g_odata,len);
} | b3a15c208835f3c934a140865556996b2fa99216.cu | //
// Created by saleh on 10/8/18.
//
__global__ void kernel_relu(const float * __restrict__ g_idata, float * __restrict__ g_odata, unsigned long len){
unsigned long idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx<len){
g_odata[idx] = g_idata[idx]>0 ? g_idata[idx] : 0;
}
}
void activation_relu(
const float *g_idata,
float *g_odata,
unsigned long len){
unsigned long blocksize, gridsize;
blocksize = 256;
gridsize = (len + blocksize -1 )/blocksize;
kernel_relu<<<gridsize,blocksize>>>(g_idata,g_odata,len);
} |
c8e779b35c9fa8af2159bb1f17301f958a80176a.hip | // !!! This is a file automatically generated by hipify!!!
// ======================================================================== //
// Copyright 2019-2020 The Collaborators //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// ======================================================================== //
#include "hip/hip_runtime_api.h"
#include "owl/common/math/vec.h"
#include "owl/common/math/AffineSpace.h"
#include <thrust/device_vector.h>
#include "RTQuery.h"
//#define DEBUG_RT
namespace advect {
struct SearchInfo {
int tetID;//global tetID, 1-based index(tetID<0), 0-based index (tetID>0)
int faceID;//global face ID
};
__device__ SearchInfo baryTetSearch(int particleID,vec3d P, int tetID_start, vec3d* d_tetVerts, vec4i* d_tetInds,
vec4i* d_facets, vec4i* d_tetfacets, FaceInfo* d_faceinfos) {
//Search hosting cell of point P start from tetID_start
int tetID_search = tetID_start;
int faceID = -1;
int tetID_previous = tetID_search;
for (int i = 0; i < 50; ++i) {//Search maximum 50 times
const vec4i index_tet = d_tetInds[tetID_search];
const vec3d A = d_tetVerts[index_tet.x];
const vec3d B = d_tetVerts[index_tet.y];
const vec3d C = d_tetVerts[index_tet.z];
const vec3d D = d_tetVerts[index_tet.w];
const vec4d bary = tetBaryCoord(P, A, B, C, D);
const double wmin = reduce_min(bary);
//Case1. This is a right cell
if (wmin >= 0.0) break;
else {//Start to move one of its neighbors
//where the facet has minimum negative number has largest possibility
const int exitfaceID = arg_min(bary);
faceID = d_tetfacets[tetID_search][exitfaceID];
//If facet neighbor is out of domain(ID<0), tetID_previous is the exit tet
tetID_previous = tetID_search;
//Update search tetID to facet neighbor
tetID_search =
d_faceinfos[faceID].front == tetID_search
? d_faceinfos[faceID].back
: d_faceinfos[faceID].front;
/*
if (particleID== 9670)
printf("%d SearchID%d tetID=%d tetID_i-1 =%d tetSearch=%d \nP (%f,%f,%f)\n Bary(%f,%f,%f,%f)\n",
particleID,i,
tetID_start, tetID_previous,tetID_search+1,//1-based to 0-based index facetID
P.x, P.y, P.z,
bary.x, bary.y, bary.z, bary.w);
*/
//Case 2. The particle is out of domain, tetID_search = -(last in domain tet+1)
if (tetID_search < 0) {
tetID_search = -(tetID_previous+1);//Set as 1-based index to avoid -0=0
break;
}
//Case3. start to next loop of search
}
}
SearchInfo info;
info.tetID = tetID_search;
info.faceID = faceID;
return info;
}
__device__ void specularReflect(vec3d P_end, vec3d Vel, int tetID, int faceID,
vec3d* d_tetVerts, FaceInfo* d_faceinfos, vec4i* d_facets,
vec3d &P_reflect, vec3d &u_reflect) {
const vec4i index = d_facets[faceID];
const vec3d A = d_tetVerts[index.x];
const vec3d B = d_tetVerts[index.y];
const vec3d C = d_tetVerts[index.z];
vec3d norm = triNorm(A, B, C);//DeviceTetMesh.cuh
if (d_faceinfos[faceID].back == tetID)//inner normal vector
norm = -norm;
//this equation is not sensitive to norm direction
P_reflect = P_end - (1.0 + 1.0) * dot(P_end - A, norm) * norm;
u_reflect = Vel - (1.0 + 1.0) * dot(Vel, norm) * norm;
}
__global__ void RTreflection(Particle* d_particles, vec4d* d_disps, vec4d* d_vels,
int* d_tetIDs, int numParticles,
vec3d* d_tetVerts, vec4i* d_tetInds,
vec4i* d_facets, vec4i* d_tetfacets, FaceInfo* d_faceinfos)
{
int particleID = threadIdx.x + blockDim.x * blockIdx.x;
if (particleID >= numParticles) return;
int tetID = d_tetIDs[particleID];
if (tetID >= 0) return;
tetID = -(tetID + 1);//Convert 1-based index back to 0-based index
Particle& p = d_particles[particleID];
vec4d& disp = d_disps[particleID];
vec4d& vel = d_vels[particleID];
const vec3d P = vec3d(p.x, p.y, p.z);
const vec3d P_end = P + vec3d(disp.x, disp.y, disp.z);
vec3d u_reflect = vec3d(vel.x, vel.y, vel.z);
vec3d P_reflect = P_end;
int tetID_bd = tetID;
for (int i = 0; i < 10; ++i) {//Maximum reflect 10 times
/*
const vec4i index_tet = d_tetInds[tetID_bd];
const vec3d A = d_tetVerts[index_tet.x];
const vec3d B = d_tetVerts[index_tet.y];
const vec3d C = d_tetVerts[index_tet.z];
const vec3d D = d_tetVerts[index_tet.w];
const vec4d bary = tetBaryCoord(P, A, B, C, D);
const vec4d bary_end = tetBaryCoord(P_reflect, A, B, C, D);
if (particleID == 9670)
printf("%d ReflectID%d tetID=%d tetbd=%d \nP (%f,%f,%f)->(%f,%f,%f)\nBary(%f,%f,%f,%f)->(%f,%f,%f,%f)\n",
particleID, i,
tetID, tetID_bd,
P.x, P.y, P.z, P_end.x, P_end.y, P_end.z,
bary.x, bary.y, bary.z, bary.w,
bary_end.x, bary_end.y, bary_end.z, bary_end.w);
*/
//P_end may still out of domain after few reflection iters
//and perform reflection again
SearchInfo info = baryTetSearch(particleID, P_reflect, tetID_bd, d_tetVerts, d_tetInds,
d_facets, d_tetfacets, d_faceinfos);
//P_end is within the domain now
if (info.tetID >= 0) {
tetID_bd = info.tetID;
break;
}
//P_end is still out of domain
tetID_bd = -(info.tetID + 1);//convert to 0-based index
const int faceID = info.faceID;
//[TODO] We can perform many different boundary treatment if based on faceID tags
//[TODO] Now we perform specular reflection on all boundaries
//Reflect P_end back to domain
specularReflect(P_reflect, u_reflect, //P_input, U_input
tetID_bd, faceID,d_tetVerts, d_faceinfos, d_facets,
P_reflect, u_reflect);//P_reflect, U_reflect
}
//Update reflect pos, disp and velocity
vec3d P_disp = P_reflect - P;
disp.x = P_disp.x;
disp.y = P_disp.y;
disp.z = P_disp.z;
vel.x = u_reflect.x;
vel.y = u_reflect.y;
vel.z = u_reflect.z;
//Update tetID
//disp.w = double(tetID);
d_tetIDs[particleID] = tetID_bd;
}
//Point-wise barycentric searching (initial valid tetID (>=0) should be provided by RTX locator)
__global__ void baryQuery(Particle* d_particles,
int* d_tetIDs, int numParticles,
vec3d* d_tetVerts, vec4i* d_tetInds,
vec4i* d_facets, vec4i* d_tetfacets, FaceInfo* d_faceinfos
)
{
int particleID = threadIdx.x + blockDim.x * blockIdx.x;
if (particleID >= numParticles) return;
if (d_tetIDs[particleID] < 0) {
printf("[Warnning!] Particle %d is out of domain %d\n", particleID, d_tetIDs[particleID]);
return;
}
Particle& p = d_particles[particleID];
vec3d P = vec3d(p.x, p.y, p.z);
//Displacement mode is query pts+disp
//Also we can access the tetID in previous timestep (useful for particle exit the domain)
int tetID_init = d_tetIDs[particleID];
SearchInfo info = baryTetSearch(particleID,P, tetID_init, d_tetVerts, d_tetInds,
d_facets, d_tetfacets, d_faceinfos);
int tetID_search = info.tetID;
//if (tetID_search != tetID_init)
// printf("Particle%d RTX TetID is not correct! %d->%d\n", particleID, tetID_init, tetID_search);
d_tetIDs[particleID] = tetID_search;
}
//Displacement mode searching (initial tetID from last timestep)
__global__ void baryQueryDisp(Particle* d_particles, vec4d* d_disps,
int* d_tetIDs, int numParticles,
vec3d* d_tetVerts, vec4i* d_tetInds,
vec4i* d_facets, vec4i* d_tetfacets, FaceInfo* d_faceinfos
)
{
int particleID = threadIdx.x + blockDim.x * blockIdx.x;
if (particleID >= numParticles) return;
Particle& p = d_particles[particleID];
vec4d& disp = d_disps[particleID];
vec3d P = vec3d(p.x, p.y, p.z);
P = P + vec3d(disp.x, disp.y, disp.z);
//Displacement mode is query pts+disp
//Also we can access the tetID in previous timestep (useful for particle exit the domain)
int tetID_init = d_tetIDs[particleID];
SearchInfo info = baryTetSearch(particleID, P, tetID_init, d_tetVerts, d_tetInds,
d_facets, d_tetfacets, d_faceinfos);
int tetID_search = info.tetID;
//if (tetID_search != tetID_init)
// printf("Particle%d RTX TetID is not correct! %d->%d\n", particleID, tetID_RTX, tetID_search);
//disp.w = double(tetID_init);
d_tetIDs[particleID] = tetID_search;
}
__global__ void baryQueryDisp_RTX(Particle* d_particles, vec4d* d_disps,
int* d_tetIDs, int numParticles,
vec3d* d_tetVerts, vec4i* d_tetInds,
vec4i* d_facets, vec4i* d_tetfacets, FaceInfo* d_faceinfos
)
{
int particleID = threadIdx.x + blockDim.x * blockIdx.x;
if (particleID >= numParticles) return;
Particle& p = d_particles[particleID];
vec4d& disp = d_disps[particleID];
vec3d P = vec3d(p.x, p.y, p.z);
P = P + vec3d(disp.x, disp.y, disp.z);
//Displacement mode is query pts+disp
//RTX locator enabled tetID is searched and old tetID is now storaged in disp.w
int tetID_RTX = d_tetIDs[particleID];
int tetID_last = -(tetID_RTX + 1);//Convert 1-based index back to 0-based index
tetID_RTX = tetID_RTX < 0 //Barycentric searching requires a good start ID
? tetID_last
: tetID_RTX;
if (tetID_RTX < 0 && tetID_last < 0) {//No GOOD tetID available for this particle
printf("[Warnning] particle %d is really out of domain %d/%d\n", particleID, tetID_RTX, tetID_last);
return;
}
tetID_RTX = tetID_last;
SearchInfo info = baryTetSearch(particleID, P, tetID_RTX, d_tetVerts, d_tetInds,
d_facets, d_tetfacets, d_faceinfos);
int tetID_search = info.tetID;
//if (tetID_search != tetID_RTX)
// printf("Particle%d RTX TetID is not correct! %d->%d\n", particleID, tetID_RTX, tetID_search);
//disp.w = double(tetID_RTX);
d_tetIDs[particleID] = tetID_search;
}
void RTQuery(OptixQuery& cellLocator, DeviceTetMesh devMesh, double4* d_particles, int* out_tetIDs, int numParticles)
{
int blockDims = 128;
int gridDims = divRoundUp(numParticles, blockDims);
//Board-phase fast RTX location
cellLocator.query_sync(d_particles, out_tetIDs, numParticles);
cudaCheck(hipDeviceSynchronize());
//Narrow-phase barycentric location
baryQuery << <gridDims, blockDims >> > (d_particles,
out_tetIDs, numParticles,
devMesh.d_positions, devMesh.d_indices,
devMesh.d_facets, devMesh.d_tetfacets, devMesh.d_faceinfos);
cudaCheck(hipDeviceSynchronize());
}
void RTQuery(OptixQuery& cellLocator, DeviceTetMesh devMesh,
double4* d_particles, vec4d* d_disps, int* out_tetIDs, int numParticles)
{
int blockDims = 128;
int gridDims = divRoundUp(numParticles, blockDims);
//Board-phase fast RTX location (optional)
cellLocator.query_sync(d_particles, (double4*)d_disps, out_tetIDs, numParticles);
cudaCheck(hipDeviceSynchronize());
//Narrow-phase barycentric searching location to fix some floating point error (optional)
//If barycentric searching is not enabled, some particle may locate in one of neigoring cell
//or considered as out-of-domain particle
/*
baryQueryDisp_RTX << <gridDims, blockDims >> > (d_particles, d_disps,
out_tetIDs, numParticles,
devMesh.d_positions, devMesh.d_indices,
devMesh.d_facets, devMesh.d_tetfacets, devMesh.d_faceinfos);
cudaCheck(hipDeviceSynchronize());
*/
}
void RTQuery(DeviceTetMesh devMesh, double4* d_particles, vec4d* d_disps, int* out_tetIDs, int numParticles)
{
int blockDims = 128;
int gridDims = divRoundUp(numParticles, blockDims);
//Narrow-phase barycentric searching location (initial tetID is from last timestep)
baryQueryDisp << <gridDims, blockDims >> > (d_particles, d_disps,
out_tetIDs, numParticles,
devMesh.d_positions, devMesh.d_indices,
devMesh.d_facets, devMesh.d_tetfacets, devMesh.d_faceinfos);
cudaCheck(hipDeviceSynchronize());
}
void RTWallReflect(DeviceTetMesh devMesh, int* d_tetIDs, Particle* d_particles, vec4d* d_disps, vec4d* d_vels, int numParticles)
{
/*
Reflect particle back to domain if it hit a wall
Parameters
----------
a : array_like
Returns
-------
x : ndarray, shape Q
Notes
-----
Multiply reflection will be applied if a particle exit from a corner
*/
int blockDims = 128;
int gridDims = divRoundUp(numParticles, blockDims);
//Boundary condition: currently it is specular reflection
RTreflection << <gridDims, blockDims >> > (d_particles, d_disps, d_vels,
d_tetIDs, numParticles,
devMesh.d_positions, devMesh.d_indices,
devMesh.d_facets, devMesh.d_tetfacets, devMesh.d_faceinfos);
cudaCheck(hipDeviceSynchronize());
}
//-----------------------Debug------------------------
__global__
void printRTTet(Particle* d_particles, vec4d* d_disps,
int* d_tetIDs, int numParticles,
vec3d* d_tetVerts, vec4i* d_tetInds
) {
int particleID = threadIdx.x + blockDim.x * blockIdx.x;
if (particleID >= numParticles) return;
Particle& p = d_particles[particleID];
vec4d& disp = d_disps[particleID];
const vec3d P = vec3d(p.x, p.y, p.z);
const vec3d P_next = P + vec3d(disp.x, disp.y, disp.z);
int tetID = d_tetIDs[particleID];
const vec4i index_tet = d_tetInds[tetID];
const vec3d A = d_tetVerts[index_tet.x];
const vec3d B = d_tetVerts[index_tet.y];
const vec3d C = d_tetVerts[index_tet.z];
const vec3d D = d_tetVerts[index_tet.w];
//
const vec4d baryCoord = tetBaryCoord(P, A, B, C, D);
const vec4d baryCoord_next = tetBaryCoord(P_next, A, B, C, D);
printf("\nParticle (%.15lf,%.15lf,%.15lf)->(%.15lf,%.15lf,%.15lf) \nBaryP (%.15lf,%.15lf,%.15lf,%.15f)\nBaryP_next (%.15lf,%.15lf,%.15lf,%.15f)\n",
P.x, P.y, P.z, P_next.x, P_next.y, P_next.z,
baryCoord.x, baryCoord.y, baryCoord.z, baryCoord.w,
baryCoord_next.x, baryCoord_next.y, baryCoord_next.z, baryCoord_next.w);
printf("Tet%d (%d,%d,%d,%d)\n (%.15lf,%.15lf,%.15lf)\n (%.15lf,%.15lf,%.15lf)\n (%.15lf,%.15lf,%.15lf)\n (%.15lf,%.15lf,%.15lf)\n",
tetID, index_tet.x, index_tet.y, index_tet.z, index_tet.w,
A.x, A.y, A.z, B.x, B.y, B.z, C.x, C.y, C.z, D.x, D.y, D.z);
printf("TetID orig %d\n", tetID);
}
void advect::testRT(OptixQuery& cellLocator, DeviceTetMesh devMesh)
{
int numParticles = 1;
int blockDims = 128;
int gridDims = divRoundUp(numParticles, blockDims);
//Example 1. Cell locator find a wrong tetId
thrust::device_vector<Particle> particles(numParticles, make_double4(77.407653286554819, -0.226291355139613, -655.599157683661019, true));
thrust::device_vector<vec4d> disps(numParticles, vec4d(-0.000234741263335, -0.000385634696309, 0.000501310939382, 0.0));
Particle* d_particles = thrust::raw_pointer_cast(particles.data());
vec4d* d_disps = thrust::raw_pointer_cast(disps.data());
thrust::device_vector<vec4d> vels(numParticles, vec4d(0.1, -0.1, 0.01, 0.0));
vec4d* d_vels = thrust::raw_pointer_cast(vels.data());
thrust::device_vector<int> triIDs(numParticles, -2);
int* d_triIDs = thrust::raw_pointer_cast(triIDs.data());
thrust::device_vector<int> tetIDs(numParticles, -1);
int* d_tetIDs = thrust::raw_pointer_cast(tetIDs.data());
//Get the initial cell location
cellLocator.query_sync(d_particles, d_tetIDs, numParticles);
cudaCheck(hipDeviceSynchronize());
//Narrow-phase barycentric location
baryQuery << <gridDims, blockDims >> > (d_particles,
d_tetIDs, numParticles,
devMesh.d_positions, devMesh.d_indices,
devMesh.d_facets, devMesh.d_tetfacets, devMesh.d_faceinfos);
cudaCheck(hipDeviceSynchronize());
//Some movement to obtain the disp
printRTTet << <gridDims, blockDims >> > (d_particles, d_disps,
d_tetIDs, numParticles,
devMesh.d_positions, devMesh.d_indices);
cudaCheck(hipDeviceSynchronize());
}
}
| c8e779b35c9fa8af2159bb1f17301f958a80176a.cu | // ======================================================================== //
// Copyright 2019-2020 The Collaborators //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// ======================================================================== //
#include "cuda_runtime_api.h"
#include "owl/common/math/vec.h"
#include "owl/common/math/AffineSpace.h"
#include <thrust/device_vector.h>
#include "RTQuery.h"
//#define DEBUG_RT
namespace advect {
struct SearchInfo {
int tetID;//global tetID, 1-based index(tetID<0), 0-based index (tetID>0)
int faceID;//global face ID
};
__device__ SearchInfo baryTetSearch(int particleID,vec3d P, int tetID_start, vec3d* d_tetVerts, vec4i* d_tetInds,
vec4i* d_facets, vec4i* d_tetfacets, FaceInfo* d_faceinfos) {
//Search hosting cell of point P start from tetID_start
int tetID_search = tetID_start;
int faceID = -1;
int tetID_previous = tetID_search;
for (int i = 0; i < 50; ++i) {//Search maximum 50 times
const vec4i index_tet = d_tetInds[tetID_search];
const vec3d A = d_tetVerts[index_tet.x];
const vec3d B = d_tetVerts[index_tet.y];
const vec3d C = d_tetVerts[index_tet.z];
const vec3d D = d_tetVerts[index_tet.w];
const vec4d bary = tetBaryCoord(P, A, B, C, D);
const double wmin = reduce_min(bary);
//Case1. This is a right cell
if (wmin >= 0.0) break;
else {//Start to move one of its neighbors
//where the facet has minimum negative number has largest possibility
const int exitfaceID = arg_min(bary);
faceID = d_tetfacets[tetID_search][exitfaceID];
//If facet neighbor is out of domain(ID<0), tetID_previous is the exit tet
tetID_previous = tetID_search;
//Update search tetID to facet neighbor
tetID_search =
d_faceinfos[faceID].front == tetID_search
? d_faceinfos[faceID].back
: d_faceinfos[faceID].front;
/*
if (particleID== 9670)
printf("%d SearchID%d tetID=%d tetID_i-1 =%d tetSearch=%d \nP (%f,%f,%f)\n Bary(%f,%f,%f,%f)\n",
particleID,i,
tetID_start, tetID_previous,tetID_search+1,//1-based to 0-based index facetID
P.x, P.y, P.z,
bary.x, bary.y, bary.z, bary.w);
*/
//Case 2. The particle is out of domain, tetID_search = -(last in domain tet+1)
if (tetID_search < 0) {
tetID_search = -(tetID_previous+1);//Set as 1-based index to avoid -0=0
break;
}
//Case3. start to next loop of search
}
}
SearchInfo info;
info.tetID = tetID_search;
info.faceID = faceID;
return info;
}
__device__ void specularReflect(vec3d P_end, vec3d Vel, int tetID, int faceID,
vec3d* d_tetVerts, FaceInfo* d_faceinfos, vec4i* d_facets,
vec3d &P_reflect, vec3d &u_reflect) {
const vec4i index = d_facets[faceID];
const vec3d A = d_tetVerts[index.x];
const vec3d B = d_tetVerts[index.y];
const vec3d C = d_tetVerts[index.z];
vec3d norm = triNorm(A, B, C);//DeviceTetMesh.cuh
if (d_faceinfos[faceID].back == tetID)//inner normal vector
norm = -norm;
//this equation is not sensitive to norm direction
P_reflect = P_end - (1.0 + 1.0) * dot(P_end - A, norm) * norm;
u_reflect = Vel - (1.0 + 1.0) * dot(Vel, norm) * norm;
}
__global__ void RTreflection(Particle* d_particles, vec4d* d_disps, vec4d* d_vels,
int* d_tetIDs, int numParticles,
vec3d* d_tetVerts, vec4i* d_tetInds,
vec4i* d_facets, vec4i* d_tetfacets, FaceInfo* d_faceinfos)
{
int particleID = threadIdx.x + blockDim.x * blockIdx.x;
if (particleID >= numParticles) return;
int tetID = d_tetIDs[particleID];
if (tetID >= 0) return;
tetID = -(tetID + 1);//Convert 1-based index back to 0-based index
Particle& p = d_particles[particleID];
vec4d& disp = d_disps[particleID];
vec4d& vel = d_vels[particleID];
const vec3d P = vec3d(p.x, p.y, p.z);
const vec3d P_end = P + vec3d(disp.x, disp.y, disp.z);
vec3d u_reflect = vec3d(vel.x, vel.y, vel.z);
vec3d P_reflect = P_end;
int tetID_bd = tetID;
for (int i = 0; i < 10; ++i) {//Maximum reflect 10 times
/*
const vec4i index_tet = d_tetInds[tetID_bd];
const vec3d A = d_tetVerts[index_tet.x];
const vec3d B = d_tetVerts[index_tet.y];
const vec3d C = d_tetVerts[index_tet.z];
const vec3d D = d_tetVerts[index_tet.w];
const vec4d bary = tetBaryCoord(P, A, B, C, D);
const vec4d bary_end = tetBaryCoord(P_reflect, A, B, C, D);
if (particleID == 9670)
printf("%d ReflectID%d tetID=%d tetbd=%d \nP (%f,%f,%f)->(%f,%f,%f)\nBary(%f,%f,%f,%f)->(%f,%f,%f,%f)\n",
particleID, i,
tetID, tetID_bd,
P.x, P.y, P.z, P_end.x, P_end.y, P_end.z,
bary.x, bary.y, bary.z, bary.w,
bary_end.x, bary_end.y, bary_end.z, bary_end.w);
*/
//P_end may still out of domain after few reflection iters
//and perform reflection again
SearchInfo info = baryTetSearch(particleID, P_reflect, tetID_bd, d_tetVerts, d_tetInds,
d_facets, d_tetfacets, d_faceinfos);
//P_end is within the domain now
if (info.tetID >= 0) {
tetID_bd = info.tetID;
break;
}
//P_end is still out of domain
tetID_bd = -(info.tetID + 1);//convert to 0-based index
const int faceID = info.faceID;
//[TODO] We can perform many different boundary treatment if based on faceID tags
//[TODO] Now we perform specular reflection on all boundaries
//Reflect P_end back to domain
specularReflect(P_reflect, u_reflect, //P_input, U_input
tetID_bd, faceID,d_tetVerts, d_faceinfos, d_facets,
P_reflect, u_reflect);//P_reflect, U_reflect
}
//Update reflect pos, disp and velocity
vec3d P_disp = P_reflect - P;
disp.x = P_disp.x;
disp.y = P_disp.y;
disp.z = P_disp.z;
vel.x = u_reflect.x;
vel.y = u_reflect.y;
vel.z = u_reflect.z;
//Update tetID
//disp.w = double(tetID);
d_tetIDs[particleID] = tetID_bd;
}
//Point-wise barycentric searching (initial valid tetID (>=0) should be provided by RTX locator)
__global__ void baryQuery(Particle* d_particles,
int* d_tetIDs, int numParticles,
vec3d* d_tetVerts, vec4i* d_tetInds,
vec4i* d_facets, vec4i* d_tetfacets, FaceInfo* d_faceinfos
)
{
int particleID = threadIdx.x + blockDim.x * blockIdx.x;
if (particleID >= numParticles) return;
if (d_tetIDs[particleID] < 0) {
printf("[Warnning!] Particle %d is out of domain %d\n", particleID, d_tetIDs[particleID]);
return;
}
Particle& p = d_particles[particleID];
vec3d P = vec3d(p.x, p.y, p.z);
//Displacement mode is query pts+disp
//Also we can access the tetID in previous timestep (useful for particle exit the domain)
int tetID_init = d_tetIDs[particleID];
SearchInfo info = baryTetSearch(particleID,P, tetID_init, d_tetVerts, d_tetInds,
d_facets, d_tetfacets, d_faceinfos);
int tetID_search = info.tetID;
//if (tetID_search != tetID_init)
// printf("Particle%d RTX TetID is not correct! %d->%d\n", particleID, tetID_init, tetID_search);
d_tetIDs[particleID] = tetID_search;
}
//Displacement mode searching (initial tetID from last timestep)
__global__ void baryQueryDisp(Particle* d_particles, vec4d* d_disps,
int* d_tetIDs, int numParticles,
vec3d* d_tetVerts, vec4i* d_tetInds,
vec4i* d_facets, vec4i* d_tetfacets, FaceInfo* d_faceinfos
)
{
int particleID = threadIdx.x + blockDim.x * blockIdx.x;
if (particleID >= numParticles) return;
Particle& p = d_particles[particleID];
vec4d& disp = d_disps[particleID];
vec3d P = vec3d(p.x, p.y, p.z);
P = P + vec3d(disp.x, disp.y, disp.z);
//Displacement mode is query pts+disp
//Also we can access the tetID in previous timestep (useful for particle exit the domain)
int tetID_init = d_tetIDs[particleID];
SearchInfo info = baryTetSearch(particleID, P, tetID_init, d_tetVerts, d_tetInds,
d_facets, d_tetfacets, d_faceinfos);
int tetID_search = info.tetID;
//if (tetID_search != tetID_init)
// printf("Particle%d RTX TetID is not correct! %d->%d\n", particleID, tetID_RTX, tetID_search);
//disp.w = double(tetID_init);
d_tetIDs[particleID] = tetID_search;
}
__global__ void baryQueryDisp_RTX(Particle* d_particles, vec4d* d_disps,
int* d_tetIDs, int numParticles,
vec3d* d_tetVerts, vec4i* d_tetInds,
vec4i* d_facets, vec4i* d_tetfacets, FaceInfo* d_faceinfos
)
{
int particleID = threadIdx.x + blockDim.x * blockIdx.x;
if (particleID >= numParticles) return;
Particle& p = d_particles[particleID];
vec4d& disp = d_disps[particleID];
vec3d P = vec3d(p.x, p.y, p.z);
P = P + vec3d(disp.x, disp.y, disp.z);
//Displacement mode is query pts+disp
//RTX locator enabled tetID is searched and old tetID is now storaged in disp.w
int tetID_RTX = d_tetIDs[particleID];
int tetID_last = -(tetID_RTX + 1);//Convert 1-based index back to 0-based index
tetID_RTX = tetID_RTX < 0 //Barycentric searching requires a good start ID
? tetID_last
: tetID_RTX;
if (tetID_RTX < 0 && tetID_last < 0) {//No GOOD tetID available for this particle
printf("[Warnning] particle %d is really out of domain %d/%d\n", particleID, tetID_RTX, tetID_last);
return;
}
tetID_RTX = tetID_last;
SearchInfo info = baryTetSearch(particleID, P, tetID_RTX, d_tetVerts, d_tetInds,
d_facets, d_tetfacets, d_faceinfos);
int tetID_search = info.tetID;
//if (tetID_search != tetID_RTX)
// printf("Particle%d RTX TetID is not correct! %d->%d\n", particleID, tetID_RTX, tetID_search);
//disp.w = double(tetID_RTX);
d_tetIDs[particleID] = tetID_search;
}
void RTQuery(OptixQuery& cellLocator, DeviceTetMesh devMesh, double4* d_particles, int* out_tetIDs, int numParticles)
{
int blockDims = 128;
int gridDims = divRoundUp(numParticles, blockDims);
//Board-phase fast RTX location
cellLocator.query_sync(d_particles, out_tetIDs, numParticles);
cudaCheck(cudaDeviceSynchronize());
//Narrow-phase barycentric location
baryQuery << <gridDims, blockDims >> > (d_particles,
out_tetIDs, numParticles,
devMesh.d_positions, devMesh.d_indices,
devMesh.d_facets, devMesh.d_tetfacets, devMesh.d_faceinfos);
cudaCheck(cudaDeviceSynchronize());
}
void RTQuery(OptixQuery& cellLocator, DeviceTetMesh devMesh,
double4* d_particles, vec4d* d_disps, int* out_tetIDs, int numParticles)
{
int blockDims = 128;
int gridDims = divRoundUp(numParticles, blockDims);
//Board-phase fast RTX location (optional)
cellLocator.query_sync(d_particles, (double4*)d_disps, out_tetIDs, numParticles);
cudaCheck(cudaDeviceSynchronize());
//Narrow-phase barycentric searching location to fix some floating point error (optional)
//If barycentric searching is not enabled, some particle may locate in one of neigoring cell
//or considered as out-of-domain particle
/*
baryQueryDisp_RTX << <gridDims, blockDims >> > (d_particles, d_disps,
out_tetIDs, numParticles,
devMesh.d_positions, devMesh.d_indices,
devMesh.d_facets, devMesh.d_tetfacets, devMesh.d_faceinfos);
cudaCheck(cudaDeviceSynchronize());
*/
}
void RTQuery(DeviceTetMesh devMesh, double4* d_particles, vec4d* d_disps, int* out_tetIDs, int numParticles)
{
int blockDims = 128;
int gridDims = divRoundUp(numParticles, blockDims);
//Narrow-phase barycentric searching location (initial tetID is from last timestep)
baryQueryDisp << <gridDims, blockDims >> > (d_particles, d_disps,
out_tetIDs, numParticles,
devMesh.d_positions, devMesh.d_indices,
devMesh.d_facets, devMesh.d_tetfacets, devMesh.d_faceinfos);
cudaCheck(cudaDeviceSynchronize());
}
void RTWallReflect(DeviceTetMesh devMesh, int* d_tetIDs, Particle* d_particles, vec4d* d_disps, vec4d* d_vels, int numParticles)
{
/*
Reflect particle back to domain if it hit a wall
Parameters
----------
a : array_like
Returns
-------
x : ndarray, shape Q
Notes
-----
Multiply reflection will be applied if a particle exit from a corner
*/
int blockDims = 128;
int gridDims = divRoundUp(numParticles, blockDims);
//Boundary condition: currently it is specular reflection
RTreflection << <gridDims, blockDims >> > (d_particles, d_disps, d_vels,
d_tetIDs, numParticles,
devMesh.d_positions, devMesh.d_indices,
devMesh.d_facets, devMesh.d_tetfacets, devMesh.d_faceinfos);
cudaCheck(cudaDeviceSynchronize());
}
//-----------------------Debug------------------------
__global__
void printRTTet(Particle* d_particles, vec4d* d_disps,
int* d_tetIDs, int numParticles,
vec3d* d_tetVerts, vec4i* d_tetInds
) {
int particleID = threadIdx.x + blockDim.x * blockIdx.x;
if (particleID >= numParticles) return;
Particle& p = d_particles[particleID];
vec4d& disp = d_disps[particleID];
const vec3d P = vec3d(p.x, p.y, p.z);
const vec3d P_next = P + vec3d(disp.x, disp.y, disp.z);
int tetID = d_tetIDs[particleID];
const vec4i index_tet = d_tetInds[tetID];
const vec3d A = d_tetVerts[index_tet.x];
const vec3d B = d_tetVerts[index_tet.y];
const vec3d C = d_tetVerts[index_tet.z];
const vec3d D = d_tetVerts[index_tet.w];
//
const vec4d baryCoord = tetBaryCoord(P, A, B, C, D);
const vec4d baryCoord_next = tetBaryCoord(P_next, A, B, C, D);
printf("\nParticle (%.15lf,%.15lf,%.15lf)->(%.15lf,%.15lf,%.15lf) \nBaryP (%.15lf,%.15lf,%.15lf,%.15f)\nBaryP_next (%.15lf,%.15lf,%.15lf,%.15f)\n",
P.x, P.y, P.z, P_next.x, P_next.y, P_next.z,
baryCoord.x, baryCoord.y, baryCoord.z, baryCoord.w,
baryCoord_next.x, baryCoord_next.y, baryCoord_next.z, baryCoord_next.w);
printf("Tet%d (%d,%d,%d,%d)\n (%.15lf,%.15lf,%.15lf)\n (%.15lf,%.15lf,%.15lf)\n (%.15lf,%.15lf,%.15lf)\n (%.15lf,%.15lf,%.15lf)\n",
tetID, index_tet.x, index_tet.y, index_tet.z, index_tet.w,
A.x, A.y, A.z, B.x, B.y, B.z, C.x, C.y, C.z, D.x, D.y, D.z);
printf("TetID orig %d\n", tetID);
}
void advect::testRT(OptixQuery& cellLocator, DeviceTetMesh devMesh)
{
int numParticles = 1;
int blockDims = 128;
int gridDims = divRoundUp(numParticles, blockDims);
//Example 1. Cell locator find a wrong tetId
thrust::device_vector<Particle> particles(numParticles, make_double4(77.407653286554819, -0.226291355139613, -655.599157683661019, true));
thrust::device_vector<vec4d> disps(numParticles, vec4d(-0.000234741263335, -0.000385634696309, 0.000501310939382, 0.0));
Particle* d_particles = thrust::raw_pointer_cast(particles.data());
vec4d* d_disps = thrust::raw_pointer_cast(disps.data());
thrust::device_vector<vec4d> vels(numParticles, vec4d(0.1, -0.1, 0.01, 0.0));
vec4d* d_vels = thrust::raw_pointer_cast(vels.data());
thrust::device_vector<int> triIDs(numParticles, -2);
int* d_triIDs = thrust::raw_pointer_cast(triIDs.data());
thrust::device_vector<int> tetIDs(numParticles, -1);
int* d_tetIDs = thrust::raw_pointer_cast(tetIDs.data());
//Get the initial cell location
cellLocator.query_sync(d_particles, d_tetIDs, numParticles);
cudaCheck(cudaDeviceSynchronize());
//Narrow-phase barycentric location
baryQuery << <gridDims, blockDims >> > (d_particles,
d_tetIDs, numParticles,
devMesh.d_positions, devMesh.d_indices,
devMesh.d_facets, devMesh.d_tetfacets, devMesh.d_faceinfos);
cudaCheck(cudaDeviceSynchronize());
//Some movement to obtain the disp
printRTTet << <gridDims, blockDims >> > (d_particles, d_disps,
d_tetIDs, numParticles,
devMesh.d_positions, devMesh.d_indices);
cudaCheck(cudaDeviceSynchronize());
}
}
|
9c1308a331140dd951b22d21a26fdcdefd66302e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zgeadd.cu, normal z -> d, Wed Jan 2 14:18:50 2019
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to dlaset.
*/
__global__
void dgeadd_full(
int m, int n,
double alpha,
const double *dA, int ldda,
double *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha DOUBLE PRECISION
The scalar alpha.
@param[in]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in,out]
dB DOUBLE PRECISION array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_dgeadd(
magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipLaunchKernelGGL(( dgeadd_full), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dA, ldda, dB, lddb );
}
| 9c1308a331140dd951b22d21a26fdcdefd66302e.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zgeadd.cu, normal z -> d, Wed Jan 2 14:18:50 2019
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to dlaset.
*/
__global__
void dgeadd_full(
int m, int n,
double alpha,
const double *dA, int ldda,
double *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha DOUBLE PRECISION
The scalar alpha.
@param[in]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in,out]
dB DOUBLE PRECISION array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_dgeadd(
magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
dgeadd_full<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, dA, ldda, dB, lddb );
}
|
17005266a13941eb83899552932a9616b3f89842.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********************************************************************
* DistributorKernel.cu
* Copyright @ Cloud Computing Lab, CS, Wuhan University
* Author: Chundan Wei
* Email: danuno@qq.com
* Version: 1.0
* Date: Oct 22, 2014 | 10:36:45 AM
* Description:*
* Licence:*
**********************************************************************/
#include <memory.h>
#include <string.h>
#include <stdio.h>
#include "config/GConfig.h"
#include "misc/BaseStruct.h"
#include "misc/Buffer.h"
#include "device/DeviceGlobalVar.cuh"
#include "DistributorKernel.cuh"
#include "misc/Cell.cuh"
#include "misc/ObjBox.cuh"
#include "misc/UpdateQNode.cuh"
#include "misc/UpdateCacheArea.cuh"
#include "misc/QueryCacheArea.h"
#include "misc/SyncFuncGPU.cuh"
__global__ void DpProcess(int *place_holder_query_dispatch_local,
int anchor_idx, int row_start, int row_end, int col_start, int col_end,
GConfig *p_config, QueryQNode *node_enqueue_query_local) {
int k = threadIdx.x;
int i = row_start + k / (col_end - col_start + 1);
int j = col_start + k % (col_end - col_start + 1);
const int EDGE_CELL_NUM = p_config->edge_cell_num;
const int LEN_SEG_CACHE_QUERY = p_config->len_seg_cache_query;
int *cnt_queries_per_cell = node_enqueue_query_local->cnt_queries_per_cell;
int *flag_cells_covered = node_enqueue_query_local->flag_cells_covered;
volatile int *volatile idx_cells_covered =
node_enqueue_query_local->idx_cells_covered;
int *queries_per_cell = node_enqueue_query_local->queries_per_cell;
int idx_cell = i * EDGE_CELL_NUM + j;
int idx_tmp_0;
int idx_tmp = atomicAdd(&cnt_queries_per_cell[idx_cell], 1);
atomicAdd(&check_tot_covered, 1);
if (idx_tmp >= LEN_SEG_CACHE_QUERY) {
atomicAdd(&cnt_queries_per_cell[idx_cell], -1);
atomicAdd(&cnt_over_seg_query, 1);
atomicAdd(place_holder_query_dispatch_local, 1);
return;
}
if (idx_tmp < 0) {
atomicAdd(place_holder_query_dispatch_local, 1);
return;
}
if (atomicCAS(&flag_cells_covered[idx_cell], 0, 1) == 0) {
atomicExch(&flag_cells_covered[idx_cell], 1);
idx_tmp_0 = atomicAdd(&node_enqueue_query_local->tot_cells_covered, 1);
idx_cells_covered[idx_tmp_0] = idx_cell;
}
queries_per_cell[idx_cell * LEN_SEG_CACHE_QUERY + idx_tmp] = anchor_idx;
atomicAdd(place_holder_query_dispatch_local, 1);
return;
}
__global__ void DistributorKernel(GConfig * dev_p_gconfig,
UpdateType * dev_buffer_update, int * dev_cnt_update,
QueryType * dev_buffer_query, int * dev_cnt_query,
UpdateCacheArea * d_req_cache_update, QueryCacheArea * d_req_cache_query,
Grid * d_index_A, Grid * d_index_B, CircularQueue * d_queue_bkts_free,
MemItem<QueryType> * d_qd_obj_pool,
CircularQueue * d_queue_idx_anchor_free, QueryType * d_qd_query_type_pool,
int * d_qd_anchor_pool, int * d_place_holder, ManagedMemory * d_mm) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int tid_b = threadIdx.x;
const unsigned int barrier_step = gridDim.x;
volatile unsigned int barrier_fence = 0;
int anchor = 0, anchor_i = 0;
//__shared__ int occupation[12288];
//extern __shared__ int s[];
const int WRAPSIZE = 32;
int realWrapSize = 0;
int realBlockSize = 0;
const int wid = tid / WRAPSIZE;
const int tid_w = tid % WRAPSIZE;
GConfig * p_config = dev_p_gconfig;
//for debug
int check_dist_query_local = 0;
//int check_tot_covered_local = 0;
int check_cnt_enqueue = 0;
int check_lb_null = 0;
int check_rt_null = 0;
long long int start = -1, end;
//long long int copytime = 0;
//double sum_time = 0;
// printf("%d\n", gp_config->block_analysis_num);
// Initialize global variables
if (tid == 0) {
atomicExch(&buffer_exhausted, 0);
atomicExch(&rebalance, 0);
atomicExch(&launch_signal, 0);
atomicExch(&exit_query, 0);
atomicExch(&exit_update, 0);
}
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
printf("sync-1 over\n");
}
__syncthreads();
// Initialize local variables
const int EDGE_CELL_NUM = p_config->edge_cell_num;
const int TOT_CELLS = p_config->edge_cell_num * p_config->edge_cell_num;
const int TOT_VGROUP_UPDATE = p_config->side_len_vgroup
* p_config->side_len_vgroup;
//int query_dispatch_fence = 0;
//int *sync_holder_query_dispatch_local = &sync_holder_query_dispatch[wid];
int *place_holder_query_dispatch_local = &place_holder_query_dispatch[wid];
int *cache_memory_idx_query_dispatch_local =
&cache_memory_idx_query_dispatch[wid];
int *place_holder_update_dispatch_local = &place_holder_update_dispatch[wid
* UPDATE_DISPATCH_SEG];
ManagedMemory lmm = *d_mm;
const int QUERY_DISPATCH_WRAP_SIZE = 512;
// Initialize local update variables
Grid * local_index = NULL;
SecIndex * local_seci = NULL;
UpdateType *buffer_update = dev_buffer_update;
int offset_buffer_update = offset_buffer_update_rec;
const int TOTAL_UPDATE = p_config->max_obj_num * p_config->round_num;
int buffer_block_size_update = p_config->buffer_block_size;
int len_seg_cache_update_local = len_seg_cache_update;
int *local_d;
int *local_d_pool;
int *local_d_cnt;
int *local_i;
UpdateType *local_i_pool;
int *local_i_cnt;
int *local_f;
UpdateType *local_f_pool;
int *local_f_cnt;
CircularQueue * local_d_fqueue, *local_i_fqueue, *local_f_fqueue;
int idx_start, idx_last;
MemElement *tmp_me;
int d_seg, i_seg, f_seg;
Cell *p_cell_new = NULL;
Cell *p_cell_old = NULL;
//Cell *p_cell_marked = NULL;
int oid = -1;
SIEntry * p_sie = NULL;
UpdateType *ins_update;
// Initialize local query variables
QueryType *buffer_query = dev_buffer_query, *p_buffer_block_query,
req_query;
int offset_buffer_query = 0;
const int TOTAL_QUERY = p_config->max_query_num;
const int LEFT_UPDATE_AFTERSKIP = TOTAL_UPDATE
- p_config->query_skip_round_num * p_config->max_obj_num;
int buffer_block_size_query = offset_buffer_query_rec;
if (LEFT_UPDATE_AFTERSKIP > p_config->buffer_block_size) {
buffer_block_size_query = (int) ((double) p_config->buffer_block_size
* (double) p_config->max_query_num
/ (double) LEFT_UPDATE_AFTERSKIP);
} else {
buffer_block_size_query = p_config->max_query_num;
}
const int LEN_SEG_CACHE_QUERY = p_config->len_seg_cache_query;
QueryType *d_qd_query_type_local;
int *d_qd_anchor_local;
const int QUERY_TYPE_POOL_SIZE = lmm.mm_qd_query_type_pool.len;
const int QUERY_SKIP_NUM = p_config->query_skip_round_num
* p_config->max_obj_num;
const int QT_SIZE = p_config->qt_size;
const int QUEUE_SEG_LEN = p_config->len_seg_multiqueue;
const int MQUEUE_SIZE = p_config->len_multiqueue;
//int idx_query = 0;
int idx_cell = 0, idx_tmp = 0, idx_tmp_0 = 0;
Grid * p_grid = NULL;
Cell *p_cell_rt = NULL, *p_cell_lb = NULL;
int left_bottom;
int right_top;
int cell_num;
int row_start;
int col_end;
int row_end;
int col_start;
float xmin, ymin, xmax, ymax;
QueryQNode * node_enqueue_query_local = NULL;
//int tot_cells_covered = 0;
int *flag_cells_covered = NULL;
volatile int *volatile idx_cells_covered = NULL;
int *cnt_queries_per_cell = NULL;
int *queries_per_cell = NULL;
QueryType *buffer_block_query = NULL;
if (tid == 0) {
atomicExch(&launch_signal, 1);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
printf("sync-2 over\n");
}
__syncthreads();
if (tid == 0) {
atomicExch(&req_cache_update->token0, 0);
atomicExch(&req_cache_update->token1, 0);
atomicExch(&req_cache_query->token0, 0);
atomicExch(&req_cache_query->token1, 0);
}
/* for(int k = 0; k<2; k++){
CircularQueue* fqueue_local;
for (int i = 0; i<3 ;i++)
{
if (i == 0)
fqueue_local = req_cache_update->array[k].fqueue_delete;
else if (i == 1)
fqueue_local = req_cache_update->array[k].fqueue_insert;
else if (i == 2)
fqueue_local = req_cache_update->array[k].fqueue_fresh;
anchor = tid;
while(anchor < fqueue_local->capacity){
if(anchor<TOT_VGROUP_UPDATE)
{
}
atomicExch(&fqueue_local->avail_idx_bkt[anchor], anchor);
anchor += blockDim.x*gridDim.x;
}
if(tid == 0){
atomicExch(&fqueue_local->cnt_elem, fqueue_local->capacity);
atomicExch(&fqueue_local->head, 0);
atomicExch(&fqueue_local->rear, 0);
}
}
}*/
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
while (offset_buffer_update < TOTAL_UPDATE) {
// Distribute Updates
if (offset_buffer_update + buffer_block_size_update >= TOTAL_UPDATE) {
buffer_block_size_update = TOTAL_UPDATE - offset_buffer_update;
}
while ((flag_switch_dist == 1
&& (req_cache_update->token0 == 1
|| req_cache_query->token0 == 1))
|| (flag_switch_dist == 0
&& (req_cache_update->token1 == 1
|| req_cache_query->token1 == 1))) {
if (tid == 0)
exp_hunger_dist0++;
}
if (query_time_per_period != 0 && update_time_per_period != 0) {
double Tdis = dis_time_per_period;
double Tupd = update_time_per_period;
double Tque = query_time_per_period;
double Dis = p_config->block_analysis_num;
double Upd = p_config->block_update_num;
double Que = p_config->block_query_num;
double TdisDis = Tdis * (double) Dis;
double TupdUpd = Tupd * (double) Upd;
double TqueQue = Tque * (double) Que;
double x_double =
(TdisDis * (Upd + Que) - Dis * (TupdUpd + TqueQue))\
/ (TdisDis + TupdUpd + TqueQue);
double y_double = TupdUpd / TdisDis * x_double + TupdUpd / Tdis
- Upd;
if (tid == 0)
printf("relance, x is %.2f, y is %.2f \n", x_double, y_double);
int x_int = (int) (x_double);
int y_int = (int) (y_double);
if (offset_buffer_update
<= (p_config->query_skip_round_num + 2)
* p_config->max_obj_num) {
x_double = .0f;
y_double = .0f;
x_int = 0;
y_int = 0;
}
#if REBALANCE == 0
x_double = 0;
y_int = 0;
x_int = 0;
y_double = 0;
#endif
if (abs(x_int) >= 1 || abs(y_int) >= 1) {
int newDis = Dis + x_int;
int newQue = Que - x_int;
if (abs(newDis) < 1) {
x_int = 1 - Dis;
newDis = Dis + x_int;
newQue = Que - x_int;
}
if (abs(newQue) < 1) {
x_int = Que - 1;
newDis = Dis + x_int;
newQue = Que - x_int;
}
int newUpd = Upd + y_int;
int newQue2 = newQue - y_int;
if (abs(newUpd) < 1) {
y_int = 1 - Upd;
newUpd = Upd + y_int;
newQue2 = newQue - y_int;
}
if (abs(newQue2) < 1) {
y_int = newQue - 1;
newUpd = Upd + y_int;
newQue2 = newQue - y_int;
}
p_config->block_analysis_num = newDis;
p_config->block_update_num = newUpd;
p_config->block_query_num = newQue2;
atomicExch(&buffer_exhausted, 1);
atomicExch(&rebalance, 1);
if (tid == 0)
printf("new kernels' rate is %d : %d : %d\n",
p_config->block_analysis_num,
p_config->block_update_num,
p_config->block_query_num);
break;
}
}
buffer_update = dev_buffer_update
+ (offset_buffer_update_rec
% (p_config->buffer_block_size
* p_config->buffer_update_round));
while (update_map[(int) (offset_buffer_update_rec
/ p_config->buffer_block_size)\
% p_config->buffer_update_round]
== 0)
__threadfence_system();
if (tid == 0) {
if (flag_switch_dist == 1 && req_cache_update->token0 == 0
&& req_cache_query->token0 == 0) {
node_enqueue_update = &req_cache_update->array[0];
flag_switch_dist = 0;
} else if (flag_switch_dist == 0 && req_cache_update->token1 == 0
&& req_cache_query->token1 == 0) {
node_enqueue_update = &req_cache_update->array[1];
flag_switch_dist = 1;
}
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
start = clock64();
if (flag_switch_version == 0) {
local_index = index_A;
local_seci = sec_index_A;
} else if (flag_switch_version == 1) {
local_index = index_B;
local_seci = sec_index_B;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
local_d = node_enqueue_update->mtx_delete_idx; //local_d refers to Delete Cache Area(oid1, oid2,...,oidn), local_d_cnt refers to its oid num;
local_d_cnt = node_enqueue_update->sum_d;
//local_d_pool = node_enqueue_update->mtx_delete_pool;
local_d_fqueue = node_enqueue_update->fqueue_delete;
//d_seg = node_dequeue_update->d_size/TOT_CELLS;
local_i = node_enqueue_update->mtx_insert_idx; //local_i refers to Insert Cache Area(req1, req2,..., reqn), local_i_cnt refers to its request num;
local_i_cnt = node_enqueue_update->sum_i;
//local_i_pool = node_enqueue_update->mtx_insert_pool;
local_i_fqueue = node_enqueue_update->fqueue_insert;
//i_seg = node_dequeue_update->i_size/TOT_CELLS;
local_f = node_enqueue_update->mtx_fresh_idx; //local_f refers to Fresh Cache Area(req1, req2,..., reqn), local_f_cnt refers to its request num;
local_f_cnt = node_enqueue_update->sum_f;
//local_f_pool = node_enqueue_update->mtx_fresh_pool;
local_f_fqueue = node_enqueue_update->fqueue_fresh;
//f_seg = node_dequeue_update->f_size/TOT_CELLS;
CircularQueue * fqueue_local;
for (int i = 0; i < 2; i++) {
if (i == 0)
fqueue_local = node_enqueue_update->fqueue_delete;
else if (i == 1)
fqueue_local = node_enqueue_update->fqueue_insert;
anchor = tid;
while (anchor < fqueue_local->capacity) {
fqueue_local->avail_idx_bkt[anchor] = anchor;
anchor += blockDim.x * gridDim.x;
}
if (tid == 0) {
atomicExch(&fqueue_local->cnt_elem, fqueue_local->capacity);
atomicExch(&fqueue_local->head, 0);
atomicExch(&fqueue_local->rear, 0);
}
}
#if CHECK_SI == 1
if (tid_b == 0)
while (local_seci->index[p_config->max_obj_num].idx_cell != -1);
#endif
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
#if CHECK_SI == 1
atomicExch(&(local_seci->index[p_config->max_obj_num].idx_cell),
p_config->max_obj_num);
#endif
int *mtx_idx;
fqueue_local = node_enqueue_update->fqueue_delete;
mtx_idx = node_enqueue_update->mtx_delete_idx;
anchor = tid;
idx_tmp = fqueue_local->head;
while (anchor < TOT_VGROUP_UPDATE) {
fqueue_local->avail_idx_bkt[(idx_tmp + anchor)
% fqueue_local->capacity] = -1;
mtx_idx[anchor] = idx_tmp + anchor;
node_enqueue_update->mtx_delete_nodes->last[anchor] = idx_tmp
+ anchor;
anchor += blockDim.x * gridDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (tid == 0) {
atomicAdd(&fqueue_local->cnt_elem, -TOT_VGROUP_UPDATE);
atomicAdd(&fqueue_local->head, TOT_VGROUP_UPDATE);
hipMemcpyAsync(node_enqueue_update->mtx_delete_nodes->mes,
node_enqueue_update->mtx_delete_nodes_bak,
sizeof(MemElement) * node_enqueue_update->d_size,
hipMemcpyDeviceToDevice);
hipMemsetAsync(node_enqueue_update->mtx_delete_nodes->cnt, 0,
sizeof(int) * node_enqueue_update->d_size);
}
hipDeviceSynchronize();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
fqueue_local = node_enqueue_update->fqueue_insert;
mtx_idx = node_enqueue_update->mtx_insert_idx;
anchor = tid;
idx_tmp = fqueue_local->head;
while (anchor < TOT_VGROUP_UPDATE) {
fqueue_local->avail_idx_bkt[(idx_tmp + anchor)
% fqueue_local->capacity] = -1;
mtx_idx[anchor] = idx_tmp + anchor;
node_enqueue_update->mtx_insert_nodes->last[anchor] = idx_tmp
+ anchor;
anchor += blockDim.x * gridDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (tid == 0) {
atomicAdd(&fqueue_local->cnt_elem, -TOT_VGROUP_UPDATE);
atomicAdd(&fqueue_local->head, TOT_VGROUP_UPDATE);
hipMemcpyAsync(node_enqueue_update->mtx_insert_nodes->mes,
node_enqueue_update->mtx_insert_nodes_bak,
sizeof(MemElement) * node_enqueue_update->i_size,
hipMemcpyDeviceToDevice);
hipMemsetAsync(node_enqueue_update->mtx_insert_nodes->cnt, 0,
sizeof(int) * node_enqueue_update->i_size);
}
hipDeviceSynchronize();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
fqueue_local = node_enqueue_update->fqueue_insert;
mtx_idx = node_enqueue_update->mtx_fresh_idx;
anchor = tid;
idx_tmp = fqueue_local->head;
while (anchor < TOT_VGROUP_UPDATE) {
fqueue_local->avail_idx_bkt[(anchor + idx_tmp)
% fqueue_local->capacity] = -1;
mtx_idx[anchor] = idx_tmp + anchor;
node_enqueue_update->mtx_insert_nodes->last[TOT_VGROUP_UPDATE
+ anchor] = idx_tmp + anchor;
anchor += blockDim.x * gridDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (tid == 0) {
atomicAdd(&fqueue_local->cnt_elem, -TOT_VGROUP_UPDATE);
atomicAdd(&fqueue_local->head, TOT_VGROUP_UPDATE);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
anchor = tid;
while (anchor < TOT_VGROUP_UPDATE) {
local_d_cnt[anchor] = 0;
local_i_cnt[anchor] = 0;
local_f_cnt[anchor] = 0;
anchor += blockDim.x * gridDim.x;
}
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
/* if(tid == 0){
end = clock64();
double temp_sum_time = ((double)(end - start))/p_config->clockRate;
sum_time += temp_sum_time;
printf("d1-0 %.4f ms\n", temp_sum_time);
}
start = clock64();
*/
if (tid_w == 0)
place_holder_update_dispatch_local[0] = 0;
MemElementCollection<UpdateType> *local_node_i =
node_enqueue_update->mtx_insert_nodes;
MemElementCollection<int> *local_node_d =
node_enqueue_update->mtx_delete_nodes;
CircularQueue * local_fqueue_u; //= local_i_fqueue;
anchor = tid;
//anchor = buffer_block_size_update;
while (anchor < buffer_block_size_update) {
#if SEG_CACHE == 0
if (place_holder_update_dispatch_local[0] < 0)
place_holder_update_dispatch_local[0] = 0;
if (place_holder_update_dispatch_local[0] < UPDATE_DISPATCH_SEG - 1)
{
if (tid_w
< UPDATE_DISPATCH_SEG - 1
- place_holder_update_dispatch_local[0])
{
int cnt_elem = atomicAdd(&local_i_fqueue->cnt_elem, -1);
int idx_in_queue = atomicAdd(&local_i_fqueue->head, 1);
int idx_anchor_in_pool =
local_i_fqueue->avail_idx_bkt[idx_in_queue
% local_i_fqueue->capacity];
if (idx_anchor_in_pool == -1)
{
printf("local_f_fqueue empty error!");
atomicAdd(&cnt_over_seg_update, 1);
break;
}
else
{
atomicExch(
&local_i_fqueue->avail_idx_bkt[idx_in_queue
% local_i_fqueue->capacity], -1);
}
place_holder_update_dispatch_local[place_holder_update_dispatch_local[0]
+ 1 + tid_w] = idx_anchor_in_pool;
}
if (tid_w == 0)
{
place_holder_update_dispatch_local[0] = UPDATE_DISPATCH_SEG
- 1;
}
}
#endif
ins_update = &buffer_update[anchor];
if (ins_update->oid > p_config->max_obj_num) {
anchor += gridDim.x * blockDim.x;
continue;
}
if (ins_update->x == 0 && ins_update->y == 0)
printf("load data error\n");
//ins_update = &buffer_update[anchor];
oid = ins_update->oid;
p_sie = &(local_seci->index[oid]); // p_sie refers to a SIEntry object; local_seci refers to a SecIndex object;
if (p_sie->idx_cell >= 0) {
p_cell_old = &local_index->arr_cell[p_sie->idx_cell]; //local_index refers to the Grid
} else {
#if IGNORE_CNT == 0
atomicAdd(&exp_old_cell_null, 1);
#endif
p_cell_old = NULL;
}
p_cell_new = local_index->getCellByXY(ins_update->x, ins_update->y);
if (p_cell_new == NULL) {
atomicAdd(&exp_new_cell_null, 1);
}
// (p_cell_new != NULL) && (p_cell_new != p_cell_old) insert
// (p_cell_old != NULL) && (p_cell_old == p_cell_new) refresh
if (p_cell_new != NULL) {
local_fqueue_u = local_i_fqueue;
//int *local_cnt;
if (p_cell_new != p_cell_old) {
//local_cnt = local_i_cnt;
idx_tmp_0 = p_cell_new->subgrid;
mtx_idx = local_i;
} else {
//local_cnt = local_f_cnt;
idx_tmp_0 = p_cell_new->subgrid + TOT_VGROUP_UPDATE;
mtx_idx = local_f;
}
//atomicAdd(&local_cnt[p_cell_new->subgrid], 1); //p_cell_new->subgrid refers to the Cell ID
idx_last = 0; //last
while (true) {
idx_last = local_node_i->last[idx_tmp_0];
if (local_node_i->cnt[idx_last] < local_node_i->LEN) {
idx_tmp = atomicAdd(&(local_node_i->cnt[idx_last]), 1);
//idx_tmp = atomicAdd(&(local_node_i->cnt[idx_last]), -1);
if (idx_tmp + 1 >= local_node_i->LEN) {
atomicAdd(&(local_node_i->cnt[idx_last]), -1);
#if SEG_CACHE == 0
tmp_me = &(local_node_i->mes[idx_last]);
if (tmp_me->next == -1)
{
if (atomicCAS(&tmp_me->lock, 0, tid + 1) == 0)
{
if (atomicCAS(&tmp_me->next, -1, -1) == -1)
{
int idx_anchor_in_pool;
int local_cache_page_idx =
atomicAdd(
place_holder_update_dispatch_local,
-1);
if (local_cache_page_idx <= 0)
{
int cnt_elem = atomicAdd(
&local_fqueue_u->cnt_elem,
-1);
int idx_in_queue = atomicAdd(
&local_fqueue_u->head, 1);
idx_anchor_in_pool =
local_fqueue_u->avail_idx_bkt[idx_in_queue
% local_fqueue_u->capacity];
if (idx_anchor_in_pool == -1)
{
printf(
"local_i&f_fqueue empty error!\t");
atomicAdd(&cnt_over_seg_update,
1);
atomicCAS(&tmp_me->lock,
tid + 1, 0);
break;
}
else
{
atomicExch(
&local_fqueue_u->avail_idx_bkt[idx_in_queue
% local_fqueue_u->capacity],
-1);
}
}
else
{
//atomicAdd(&exp_new_cell_null, 1);
idx_anchor_in_pool =
place_holder_update_dispatch_local[local_cache_page_idx];
}
atomicExch(&tmp_me->next,
idx_anchor_in_pool);
atomicExch(
&(local_node_i->last[idx_tmp_0]),
idx_anchor_in_pool);
}
atomicCAS(&tmp_me->lock, tid + 1, 0);
}
}
#else
atomicAdd(&cnt_over_seg_update, 1);
break;
#endif
} else {
local_node_i->pool[idx_last * local_node_i->LEN
+ idx_tmp] = *ins_update;
break;
}
//break;
}
#if SEG_CACHE == 1
break;
#endif
}
}
if ((p_cell_new != NULL) && (p_cell_old != NULL)
&& (p_cell_old != p_cell_new)) {
local_fqueue_u = local_d_fqueue;
while (true) {
idx_last = local_node_d->last[p_cell_old->subgrid];
if (local_node_d->cnt[idx_last] < local_node_d->LEN) {
idx_tmp = atomicAdd(&(local_node_d->cnt[idx_last]), 1);
//idx_tmp = atomicAdd(&(local_node_d->cnt[idx_last]), -1);
if (idx_tmp + 1 >= local_node_d->LEN) {
atomicAdd(&(local_node_d->cnt[idx_last]), -1);
#if SEG_CACHE == 0
tmp_me = &(local_node_d->mes[idx_last]);
if (tmp_me->next == -1)
{
if (atomicCAS(&tmp_me->lock, 0, tid + 1) == 0)
{
if (atomicCAS(&tmp_me->next, -1, -1) == -1)
{
int idx_anchor_in_pool;
int cnt_elem = atomicAdd(
&local_fqueue_u->cnt_elem, -1);
int idx_in_queue = atomicAdd(
&local_fqueue_u->head, 1);
idx_anchor_in_pool =
local_fqueue_u->avail_idx_bkt[idx_in_queue
% local_fqueue_u->capacity];
if (idx_anchor_in_pool == -1)
{
printf(
"local_d_fqueue empty error!");
atomicAdd(&cnt_over_seg_update, 1);
atomicCAS(&tmp_me->lock, tid + 1,
0);
break;
}
else
{
atomicExch(
&local_fqueue_u->avail_idx_bkt[idx_in_queue
% local_fqueue_u->capacity],
-1);
}
atomicExch(&tmp_me->next,
idx_anchor_in_pool);
atomicExch(
&(local_node_d->last[p_cell_old->subgrid]),
idx_anchor_in_pool);
}
atomicCAS(&tmp_me->lock, tid + 1, 0);
}
}
#else
atomicAdd(&cnt_over_seg_update, 1);
break;
#endif
} else {
if (idx_tmp > local_node_d->LEN)
printf("idx_tmp error\n");
local_node_d->pool[idx_last * local_node_d->LEN
+ idx_tmp] = ins_update->oid;
break;
}
//break;
}
#if SEG_CACHE == 1
break;
#endif
}
}
anchor += blockDim.x * gridDim.x;
}
offset_buffer_update += buffer_block_size_update;
__threadfence_system();
hipDeviceSynchronize();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (tid == 0)
atomicExch(
update_map
+ (int) (offset_buffer_update_rec
/ p_config->buffer_block_size)
% p_config->buffer_update_round, 0);
//if(tid == 0) printf("local_i_fqueue used %d \n", local_i_fqueue->capacity-local_fqueue_u->cnt_elem);
#if CHECK_UPDATE_MEMPOOL == 1
anchor = tid;
while(anchor < TOT_VGROUP_UPDATE)
{
int tmp_idx = node_enqueue_update->mtx_insert_idx[anchor];
int tmp_cnt = 0;
do
{
MemItem<UpdateType>* qn_cursor = &node_enqueue_update->mtx_insert_node[tmp_idx];
tmp_cnt += qn_cursor->cnt;
tmp_idx = qn_cursor->next;
}while(tmp_idx != -1);
atomicAdd(&cnt_over_seg_update, tmp_cnt);
atomicAdd(&cnt_over_seg_query, local_i_cnt[anchor]);
anchor+=gridDim.x*blockDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
#endif
if (tid == 0) {
#if CHECK_UPDATE_MEMPOOL == 1
printf("\nInsert:\n");
for(int i = 0; i<TOT_VGROUP_UPDATE; i+=100)
{
printf("%d ",local_i_cnt[i]);
}
printf("\n\n");
printf("\nInsert check:\n");
for(int i = 0; i<TOT_VGROUP_UPDATE; i+=100)
{
int tmp_idx = node_enqueue_update->mtx_insert_idx[i];
int tmp_cnt = 0;
do
{
MemItem<UpdateType>* qn_cursor = &node_enqueue_update->mtx_insert_node[tmp_idx];
tmp_cnt += qn_cursor->cnt;
tmp_idx = qn_cursor->next;
}while(tmp_idx != -1);
printf("%d ",tmp_cnt);
}
printf("\n\n");
#endif
end = clock64();
long long int temp_sum_time = ((double) (end - start));
atomicExch(&dis_time_per_period, (int) (end - start));
if (offset_buffer_update > QUERY_SKIP_NUM)
distribute_sumtime += temp_sum_time;
printf("d1 %.4f ms\n",
(double) temp_sum_time / (double) p_config->clockRate);
atomicExch(&offset_buffer_update_rec, offset_buffer_update);
if (flag_switch_dist == 0) {
atomicExch(&req_cache_update->cnt0, cnt_enqueue_update);
atomicExch(&req_cache_update->token0, 1);
}
if (flag_switch_dist == 1) {
atomicExch(&req_cache_update->cnt1, cnt_enqueue_update);
atomicExch(&req_cache_update->token1, 1);
}
node_enqueue_update = NULL;
atomicAdd(&cnt_enqueue_update, 1);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (offset_buffer_update <= QUERY_SKIP_NUM) {
if (tid == 0) {
atomicAdd(&cnt_enqueue_query, 1);
atomicAdd(&cnt_dequeue_query, 1);
}
continue;
}
if (offset_buffer_update <= QUERY_SKIP_NUM + buffer_block_size_update) {
if (tid == 0) {
//start = clock64();
}
}
// ------------------------------------------------------------------------------------------------------------
// Distribute Queries
if (offset_buffer_query + buffer_block_size_query >= TOTAL_QUERY) {
buffer_block_size_query = TOTAL_QUERY - offset_buffer_query;
}
if (flag_switch_dist == 0) {
while (req_cache_query->token0 == 1) {
if (tid == 0)
exp_hunger_dist1++;
}
} else if (flag_switch_dist == 1) {
while (req_cache_query->token1 == 1) {
if (tid == 0)
exp_hunger_dist1++;
}
}
start = clock64();
if (tid == 0) {
if (flag_switch_dist == 0) {
node_enqueue_query = &req_cache_query->array[0];
} else if (flag_switch_dist == 1) {
node_enqueue_query = &req_cache_query->array[1];
}
node_enqueue_query->tot_cells_covered = 0;
node_enqueue_query->bound_btw_cell[0] = 0;
node_enqueue_query->buffer_block_size_query =
buffer_block_size_query;
atomicExch(&cnt_singular, 0);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
node_enqueue_query_local = node_enqueue_query;
flag_cells_covered = node_enqueue_query->flag_cells_covered;
idx_cells_covered = node_enqueue_query->idx_cells_covered;
cnt_queries_per_cell = node_enqueue_query->cnt_queries_per_cell;
queries_per_cell = node_enqueue_query->queries_per_cell;
buffer_block_query = node_enqueue_query->buffer_block_query;
node_enqueue_query->offset_buffer_query = offset_buffer_query;
if (flag_switch_version == 0) {
p_grid = index_A;
} else if (flag_switch_version == 1) {
p_grid = index_B;
}
anchor = tid;
while (anchor < TOT_CELLS) {
cnt_queries_per_cell[anchor] = 0;
anchor += blockDim.x * gridDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
p_buffer_block_query = buffer_query + offset_buffer_query;
#if (QUERY_PATTERN != 3 && QUERY_PATTERN != 30)
for (int i = 0; i < 1; i++) {
fqueue_local = node_enqueue_query->fqueue_query;
anchor = tid;
while (anchor < fqueue_local->capacity) {
fqueue_local->avail_idx_bkt[anchor] = anchor;
anchor += blockDim.x * gridDim.x;
}
if (tid == 0) {
atomicExch(&fqueue_local->cnt_elem, fqueue_local->capacity);
atomicExch(&fqueue_local->head, 0);
atomicExch(&fqueue_local->rear, 0);
}
}
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
fqueue_local = node_enqueue_query->fqueue_query;
mtx_idx = node_enqueue_query->mtx_query_idx;
anchor = tid;
idx_tmp = fqueue_local->head;
while (anchor < TOT_VGROUP_UPDATE) {
fqueue_local->avail_idx_bkt[(idx_tmp + anchor)
% fqueue_local->capacity] = -1;
mtx_idx[anchor] = idx_tmp + anchor;
node_enqueue_query->mtx_query_nodes->last[anchor] = idx_tmp
+ anchor;
anchor += blockDim.x * gridDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (tid == 0) {
atomicAdd(&fqueue_local->cnt_elem, -TOT_VGROUP_UPDATE);
atomicAdd(&fqueue_local->head, TOT_VGROUP_UPDATE);
hipMemcpyAsync(node_enqueue_query->mtx_query_nodes->mes,
node_enqueue_query->mtx_query_nodes_bak,
sizeof(MemElement) * node_enqueue_query->q_size,
hipMemcpyDeviceToDevice);
hipMemsetAsync(node_enqueue_query->mtx_query_nodes->cnt, 0,
sizeof(int) * node_enqueue_query->q_size);
}
hipDeviceSynchronize();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
anchor = tid;
#if (USE_MULTIQUEUE == 1)
anchor = tid;
while (anchor < QUERY_TYPE_POOL_SIZE)
{
d_qd_anchor_pool[anchor] = -1;
anchor += blockDim.x * gridDim.x;
}
if (tid == 0)
{
atomicExch(&d_queue_idx_anchor_free->cnt_elem,
d_queue_idx_anchor_free->capacity);
atomicExch(&d_queue_idx_anchor_free->head, 0);
atomicExch(&d_queue_idx_anchor_free->rear, 0);
}
anchor = tid;
while (anchor < d_queue_idx_anchor_free->capacity)
{
atomicExch(&d_queue_idx_anchor_free->avail_idx_bkt[anchor], anchor);
anchor += blockDim.x * gridDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence)
;
}
__syncthreads();
anchor = tid;
while (anchor < MQUEUE_SIZE)
{
atomicAdd(&d_queue_idx_anchor_free->cnt_elem, -1);
int idx_in_queue = atomicAdd(&d_queue_idx_anchor_free->head, 1);
int idx_anchor_in_pool =
d_queue_idx_anchor_free->avail_idx_bkt[idx_in_queue
% d_queue_idx_anchor_free->capacity];
d_queue_idx_anchor_free->avail_idx_bkt[idx_in_queue
% d_queue_idx_anchor_free->capacity] = -1;
MemItem<QueryType>* qn = &d_qd_obj_pool[idx_anchor_in_pool];
qn->id = idx_anchor_in_pool;
qn->cnt = 0;
qn->queuelen = 1;
qn->len = QT_SIZE;
qn->next = -1;
qn->last = idx_anchor_in_pool;
qn->lock = 0;
multiqueue[anchor] = idx_anchor_in_pool;
anchor += gridDim.x * blockDim.x;
}
anchor = tid;
while (anchor < 512)
{
cache_memory_idx_query_dispatch[anchor] = -1;
anchor += gridDim.x * blockDim.x;
}
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence)
;
}
__syncthreads();
anchor = tid;
while (anchor < buffer_block_size_query)
{
req_query = p_buffer_block_query[anchor];
xmin = req_query.minX;
ymin = req_query.minY;
xmax = req_query.maxX;
ymax = req_query.maxY;
p_cell_lb = p_grid->getNearestCellByXY(xmin, ymin);
p_cell_rt = p_grid->getNearestCellByXY(xmax, ymax);
left_bottom = p_cell_lb->idx;
right_top = p_cell_rt->idx;
cell_num = p_grid->cell_num;
row_start = right_top / cell_num;
col_end = right_top % cell_num;
row_end = left_bottom / cell_num;
col_start = left_bottom % cell_num;
int cellNum = (row_end - row_start + 1) * (col_end - col_start + 1);
int count = cellNum / QUEUE_SEG_LEN + 1;
if (count >= MQUEUE_SIZE)
{
printf("count over MQUEUE_SIZE\n");
continue;
}
MemItem<QueryType>* qn_start = &d_qd_obj_pool[multiqueue[count]];
MemItem<QueryType>* qn_last;
bool errorFlag = false;
while (true)
{
qn_last = &d_qd_obj_pool[qn_start->last];
if (qn_last->cnt >= qn_last->len)
{
while (qn_last->cnt >= qn_last->len)
ins_update = &buffer_update[anchor]; //donothing
continue;
}
if (*cache_memory_idx_query_dispatch_local == -1)
{
if (tid_w == 0)
{
int cnt_elem = atomicAdd(
&d_queue_idx_anchor_free->cnt_elem, -1);
int idx_in_queue = atomicAdd(
&d_queue_idx_anchor_free->head, 1);
int idx_anchor_in_pool =
d_queue_idx_anchor_free->avail_idx_bkt[idx_in_queue
% d_queue_idx_anchor_free->capacity];
if (idx_anchor_in_pool == -1)
{
printf("idx_anchor_in_pool empty error!");
errorFlag = true;
break;
}
else
{
atomicExch(
&d_queue_idx_anchor_free->avail_idx_bkt[idx_in_queue
% d_queue_idx_anchor_free->capacity],
-1);
MemItem<QueryType>* newQn =
&d_qd_obj_pool[idx_anchor_in_pool];
atomicExch(&newQn->id, idx_anchor_in_pool);
atomicExch(&newQn->cnt, 0);
atomicExch(&newQn->queuelen, 1);
atomicExch(&newQn->len, QT_SIZE);
atomicExch(&newQn->next, -1);
atomicExch(&newQn->last, idx_anchor_in_pool);
atomicExch(&newQn->lock, 0);
atomicExch(cache_memory_idx_query_dispatch_local,
idx_anchor_in_pool);
}
}
}
idx_tmp = atomicAdd(&qn_last->cnt, 1);
if (idx_tmp + 1 >= qn_last->len)
{
atomicAdd(&qn_last->cnt, -1);
idx_tmp_0 = atomicExch(
cache_memory_idx_query_dispatch_local, -1);
if (idx_tmp_0 > 0)
{
if (qn_last->next == -1)
{
if (atomicCAS(&qn_last->lock, 0, 1) == 0)
{
if (atomicCAS(&qn_last->next, -1, -1) == -1)
{
atomicExch(&qn_last->next, idx_tmp_0);
atomicExch(&qn_start->last, idx_tmp_0);
atomicAdd(&qn_start->queuelen, 1);
atomicCAS(&qn_last->lock, 1, 0);
continue;
}
atomicCAS(&qn_last->lock, 1, 0);
}
}
atomicExch(cache_memory_idx_query_dispatch_local,
idx_tmp_0);
}
continue;
}
break;
}
if (!errorFlag)
{
qn_last->pool[idx_tmp] = req_query;
qn_last->cache_anchor[idx_tmp] = anchor;
}
#if IGNORE_CNT==0
atomicAdd(&cnt_queries, 1);
#endif
anchor += blockDim.x * gridDim.x;
}
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence)
;
}
__syncthreads();
anchor = tid;
while (anchor < blockDim.x * gridDim.x)
{
if (*cache_memory_idx_query_dispatch_local != -1)
{
if (tid_w == 0)
{
*cache_memory_idx_query_dispatch_local = -1;
}
}
anchor += blockDim.x * gridDim.x;
}
#if CHECK_QUERY_MEMPOOL==1
if(tid == 0)
{
atomicExch(&query_sum_formempool, 0);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
anchor = tid;
while(anchor < MQUEUE_SIZE)
{
int cursor_idx = multiqueue[anchor];
MemItem<QueryType>* qn_cursor;
int query_cnt_temp = 0;
while(cursor_idx != -1)
{
qn_cursor = &d_qd_obj_pool[cursor_idx];
query_cnt_temp += qn_cursor->cnt;
cursor_idx = qn_cursor->next;
}
printf("%d ", query_cnt_temp);
atomicAdd(&query_sum_formempool, query_cnt_temp);
anchor += gridDim.x*blockDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if(tid == 0)
{
printf("\n");
printf("SUM : %d\n", query_sum_formempool);
}
#endif
if (tid == 0)
{
atomicExch(&cursor_distribute_wrap, 0);
atomicExch(&cnt_distribute_wrap, 0);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence)
;
}
__syncthreads();
MemElementCollection<int>* local_node_q =
node_enqueue_query->mtx_query_nodes;
CircularQueue* local_fqueue_q = node_enqueue_query->fqueue_query;
while (true)
{
if (cursor_distribute_wrap * QUERY_DISPATCH_WRAP_SIZE
>= QUERY_TYPE_POOL_SIZE)
break;
if (tid_w == 0)
{
*place_holder_query_dispatch_local = QUERY_DISPATCH_WRAP_SIZE
* atomicAdd(&cursor_distribute_wrap, 1);
}
if (*place_holder_query_dispatch_local >= QUERY_TYPE_POOL_SIZE)
{
break; //exit;
}
if (*place_holder_query_dispatch_local < 0)
{
break;
}
int anchor_end = QUERY_TYPE_POOL_SIZE
- *place_holder_query_dispatch_local;
if (anchor_end > QUERY_DISPATCH_WRAP_SIZE)
anchor_end = QUERY_DISPATCH_WRAP_SIZE;
d_qd_anchor_local = d_qd_anchor_pool
+ (*place_holder_query_dispatch_local);
d_qd_query_type_local = d_qd_query_type_pool
+ (*place_holder_query_dispatch_local);
anchor = tid_w;
int anchor_idx;
while (anchor < QUERY_DISPATCH_WRAP_SIZE)
{
if (anchor >= anchor_end)
break;
anchor_idx = d_qd_anchor_local[anchor];
if (anchor_idx < 0)
{
break;
}
req_query = d_qd_query_type_local[anchor];
xmin = req_query.minX;
ymin = req_query.minY;
xmax = req_query.maxX;
ymax = req_query.maxY;
p_cell_lb = p_grid->getNearestCellByXY(xmin, ymin);
p_cell_rt = p_grid->getNearestCellByXY(xmax, ymax);
left_bottom = p_cell_lb->idx;
right_top = p_cell_rt->idx;
cell_num = p_grid->cell_num;
row_start = right_top / cell_num;
col_end = right_top % cell_num;
row_end = left_bottom / cell_num;
col_start = left_bottom % cell_num;
anchor_idx = atomicAdd(&cnt_distribute_wrap, 1);
int cellNum = (row_end - row_start + 1)
* (col_end - col_start + 1);
for (int k = 0; k < cellNum; k++)
{
int i = row_start + k / (col_end - col_start + 1);
int j = col_start + k % (col_end - col_start + 1);
idx_cell = i * EDGE_CELL_NUM + j;
idx_tmp = atomicAdd(&cnt_queries_per_cell[idx_cell], 1);
#if IGNORE_CNT==0
atomicAdd(&check_tot_covered, 1);
#endif
#if SEG_CACHE == 0
if (idx_tmp >= local_node_q->LEN)
{
atomicAdd(&cnt_queries_per_cell[idx_cell], -1);
atomicAdd(&cnt_over_seg_query, 1);
continue;
}
#endif
if (idx_tmp < 0)
continue;
if (atomicCAS(&flag_cells_covered[idx_cell], 0, 1) == 0)
{
//atomicExch(&flag_cells_covered[idx_cell], 1);
idx_tmp_0 = atomicAdd(
&node_enqueue_query->tot_cells_covered, 1);
idx_cells_covered[idx_tmp_0] = idx_cell;
}
//queries_per_cell[idx_cell * LEN_SEG_CACHE_QUERY + idx_tmp] = anchor_idx;
while (true)
{
idx_last = local_node_q->last[idx_cell];
if (local_node_q->cnt[idx_last] < local_node_q->LEN)
{
idx_tmp = atomicAdd(&(local_node_q->cnt[idx_last]),
1);
if (idx_tmp + 1 >= local_node_q->LEN)
{
atomicAdd(&(local_node_q->cnt[idx_last]), -1);
#if SEG_CACHE == 0
tmp_me = &(local_node_q->mes[idx_last]);
if (tmp_me->next == -1)
{
if (atomicCAS(&tmp_me->lock, 0, tid + 1)
== 0)
{
if (atomicCAS(&tmp_me->next, -1, -1)
== -1)
{
int idx_anchor_in_pool;
int cnt_elem = atomicAdd(
&local_fqueue_q->cnt_elem,
-1);
int idx_in_queue = atomicAdd(
&local_fqueue_q->head, 1);
idx_anchor_in_pool =
local_fqueue_q->avail_idx_bkt[idx_in_queue
% local_fqueue_q->capacity];
if (idx_anchor_in_pool == -1)
{
printf(
"local_fqueue_q empty error!");
atomicAdd(&cnt_over_seg_query,
1);
atomicCAS(&tmp_me->lock,
tid + 1, 0);
break;
}
else
{
atomicExch(
&local_fqueue_q->avail_idx_bkt[idx_in_queue
% local_fqueue_q->capacity],
-1);
}
atomicExch(&tmp_me->next,
idx_anchor_in_pool);
atomicExch(
&(local_node_q->last[idx_cell]),
idx_anchor_in_pool);
}
atomicCAS(&tmp_me->lock, tid + 1, 0);
}
}
#else
atomicAdd(&cnt_over_seg_query, 1);
break;
#endif
}
else
{
if (idx_tmp > local_node_q->LEN)
printf("idx_tmp error\n");
local_node_q->pool[idx_last * local_node_q->LEN
+ idx_tmp] = anchor_idx;
break;
}
}
}
}
buffer_block_query[anchor_idx] = req_query;
check_dist_query_local++;
#if IGNORE_CNT==0
atomicAdd(&cnt_queries, 1);
#endif
__threadfence_system();
anchor += WRAPSIZE;
}
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence)
;
}
__syncthreads();
#else
//lwh
MemElementCollection<int> *local_node_q = node_enqueue_query->mtx_query_nodes;
CircularQueue * local_fqueue_q = node_enqueue_query->fqueue_query;
int anchor_idx;
anchor = tid;
while (anchor < buffer_block_size_query) {
req_query = p_buffer_block_query[anchor];
//lwh
anchor_idx = d_qd_anchor_local[anchor];
xmin = req_query.minX;
ymin = req_query.minY;
xmax = req_query.maxX;
ymax = req_query.maxY;
p_cell_lb = p_grid->getNearestCellByXY(xmin, ymin);
p_cell_rt = p_grid->getNearestCellByXY(xmax, ymax);
left_bottom = p_cell_lb->idx;
right_top = p_cell_rt->idx;
cell_num = p_grid->cell_num;
row_start = right_top / cell_num;
col_end = right_top % cell_num;
row_end = left_bottom / cell_num;
col_start = left_bottom % cell_num;
anchor_idx = atomicAdd(&cnt_distribute_wrap, 1);
int cellNum = (row_end - row_start + 1) * (col_end - col_start + 1);
int count = cellNum / QUEUE_SEG_LEN + 1;
if (count >= MQUEUE_SIZE) {
printf("count over MQUEUE_SIZE\n");
continue;
}
#if IGNORE_CNT == 0
atomicAdd(multiqueue + count, 1);
#endif
#if USE_DPPROCESS == 1
hipLaunchKernelGGL(( DpProcess), dim3(1), dim3(cellNum), 0, 0, place_holder_query_dispatch_local, anchor, row_start, row_end, col_start, col_end, dev_p_gconfig, node_enqueue_query);
hipDeviceSynchronize();
#else
for (int k = 0; k < cellNum; k++) {
int i = row_start + k / (col_end - col_start + 1);
int j = col_start + k % (col_end - col_start + 1);
idx_cell = i * EDGE_CELL_NUM + j;
idx_tmp = atomicAdd(&cnt_queries_per_cell[idx_cell], 1);
#if IGNORE_CNT == 0
atomicAdd(&check_tot_covered, 1);
#endif
#if SEG_CACHE == 0
if(idx_tmp >= local_node_q->LEN)
{
atomicAdd(&cnt_queries_per_cell[idx_cell], -1);
atomicAdd(&cnt_over_seg_query, 1);
continue;
}
#endif
if (atomicCAS(&flag_cells_covered[idx_cell], 0, 1) == 0) {
//atomicExch(&flag_cells_covered[idx_cell], 1);
idx_tmp_0 = atomicAdd(&node_enqueue_query->tot_cells_covered, 1);
idx_cells_covered[idx_tmp_0] = idx_cell;
}
//queries_per_cell[idx_cell * LEN_SEG_CACHE_QUERY + idx_tmp] = anchor;
while (true) {
idx_last = local_node_q->last[idx_cell];
if (local_node_q->cnt[idx_last] < local_node_q->LEN) {
idx_tmp = atomicAdd(&(local_node_q->cnt[idx_last]), 1);
if (idx_tmp + 1 >= local_node_q->LEN) {
atomicAdd(&(local_node_q->cnt[idx_last]), -1);
#if SEG_CACHE == 0
tmp_me = &(local_node_q->mes[idx_last]);
if(tmp_me->next == -1)
{
if(atomicCAS(&tmp_me->lock,0,tid+1) == 0)
{
if(atomicCAS(&tmp_me->next,-1,-1) == -1)
{
int idx_anchor_in_pool;
int cnt_elem = atomicAdd(&local_fqueue_q->cnt_elem, -1);
int idx_in_queue = atomicAdd(&local_fqueue_q->head, 1);
idx_anchor_in_pool = local_fqueue_q->avail_idx_bkt[idx_in_queue % local_fqueue_q->capacity];
if(idx_anchor_in_pool == -1)
{
printf("local_fqueue_q empty error!");
atomicAdd(&cnt_over_seg_query, 1);
atomicCAS(&tmp_me->lock,tid+1,0);
break;
}
else
{
atomicExch(&local_fqueue_q->avail_idx_bkt[idx_in_queue % local_fqueue_q->capacity], -1);
}
atomicExch(&tmp_me->next, idx_anchor_in_pool);
atomicExch(&(local_node_q->last[idx_cell]), idx_anchor_in_pool);
}
atomicCAS(&tmp_me->lock,tid+1,0);
}
}
#else
atomicAdd(&cnt_over_seg_query, 1);
break;
#endif
} else {
if (idx_tmp > local_node_q->LEN)
printf("idx_tmp error\n");
local_node_q->pool[idx_last * local_node_q->LEN + idx_tmp] = anchor_idx;
break;
}
}
}
}
#endif
buffer_block_query[anchor] = req_query;
check_dist_query_local++;
#if IGNORE_CNT == 0
atomicAdd(&cnt_queries, 1);
#endif
anchor += blockDim.x * gridDim.x;
}
#endif
// if(tid == 0)
// {
// for(int k = 0; k<MQUEUE_SIZE; k++){
// printf("%d ", multiqueue[k]);
// }
// printf("\n");
// }
#else //QUERY PATTHERN ELSE
MemElementCollection<int>* local_node_q = node_enqueue_query->mtx_query_nodes;
atomicExch(&local_node_q->globalCnt, 0);
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
anchor = tid;
while (anchor < buffer_block_size_query)
{
req_query = p_buffer_block_query[anchor];
atomicAdd(&local_node_q->globalCnt,1);
buffer_block_query[anchor] = req_query;
check_dist_query_local++;
#if IGNORE_CNT==0
atomicAdd(&cnt_queries,1);
#endif
anchor += blockDim.x * gridDim.x;
}
#endif
check_cnt_enqueue++;
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
// anchor = tid;
// while(anchor < tot_cells_covered)
// {
// flag_cells_covered[ idx_cells_covered[anchor] ] = 0;
// anchor += blockDim.x * gridDim.x;
// }
//
// __syncthreads();
// barrier_fence += barrier_step;
// if (tid_b == 0)
// {
// atomicAdd(&barrier_dist, 1);
// while (barrier_dist < barrier_fence);
// }
// __syncthreads();
offset_buffer_query += buffer_block_size_query;
if (tid == 0) {
end = clock64();
long long int temp_sum_time = (end - start);
atomicAdd(&dis_time_per_period, (int) (end - start));
distribute_sumtime += temp_sum_time;
printf("d2 %.4f ms\n",
(double) temp_sum_time / p_config->clockRate);
atomicExch(&offset_buffer_query_rec, offset_buffer_query);
if (flag_switch_dist == 0) {
atomicExch(&req_cache_query->cnt0, cnt_enqueue_query);
atomicExch(&req_cache_query->token0, 1);
}
if (flag_switch_dist == 1) {
atomicExch(&req_cache_query->cnt1, cnt_enqueue_query);
atomicExch(&req_cache_query->token1, 1);
}
node_enqueue_query = NULL;
atomicAdd(&cnt_enqueue_query, 1);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
//end = clock64();
if (tid == 0) {
*dev_cnt_update = offset_buffer_update;
*dev_cnt_query = offset_buffer_query;
}
if (tid == 0 && offset_buffer_update >= TOTAL_UPDATE) {
atomicExch(&p_config->terminalFlag, 1);
atomicExch(&buffer_exhausted, 1);
if (start != -1) {
printf("\nDistributorKernel clock: %f ms\n",
((double) distribute_sumtime) / p_config->clockRate);
}
printf("\n");
printf("len_seg_cache_update_local: %d\n", len_seg_cache_update_local);
printf("cnt_enqueue_update: %d\n", cnt_enqueue_update);
printf("cnt_enqueue_query: %d\n", cnt_enqueue_query);
printf("exp_hunger_dist0: %d\n", exp_hunger_dist0);
printf("exp_hunger_dist1: %d\n", exp_hunger_dist1);
printf("offset_buffer_update: %d\n", offset_buffer_update);
printf("dev_cnt_update: %d\n", *dev_cnt_update);
printf("offset_buffer_query: %d\n", offset_buffer_query);
printf("dev_cnt_query: %d\n", *dev_cnt_query);
printf("len_seg_cache_query in Distribute Kernel: %d\n",
LEN_SEG_CACHE_QUERY);
printf("buffer_block_size_update: %d\n", buffer_block_size_update);
printf("buffer_block_size_query: %d\n", buffer_block_size_query);
printf("exp_new_cell_null: %d\n", exp_new_cell_null);
printf("exp_update_in_spec_cell: %d\n", exp_update_in_spec_cell);
printf("exp_old_cell_null: %d\n", exp_old_cell_null);
printf("check_tot_covered: %d %d\n",
check_tot_covered / cnt_enqueue_query, cnt_enqueue_query);
printf("check_dist_query_local: %d\n", check_dist_query_local);
printf("check_cnt_enqueue: %d\n", check_cnt_enqueue);
printf("check_lb_null: %d\n", check_lb_null);
printf("check_rt_null: %d\n", check_rt_null);
printf("cnt_over_seg_update: %d\n", cnt_over_seg_update);
printf("cnt_over_seg_query: %d\n", cnt_over_seg_query);
printf("cnt_queries: %d\n", cnt_queries);
// printf("Sizeof UpdateType: %d\n", sizeof(UpdateType));
// printf("Sizeof QueryType: %d\n", sizeof(QueryType));
// printf("Sizeof ObjBox: %d\n", sizeof(ObjBox));
// printf("Sizeof QueryQNode: %d\n", sizeof(QueryQNode));
// printf("Sizeof QueryType *: %d\n", sizeof(QueryType *));
// printf("Sizeof int *: %d\n", sizeof(int *));
// printf("Sizeof SIEntry: %d\n", sizeof(SIEntry));
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (tid == 0) {
atomicExch(&barrier_dist, 0);
}
}
| 17005266a13941eb83899552932a9616b3f89842.cu | /**********************************************************************
* DistributorKernel.cu
* Copyright @ Cloud Computing Lab, CS, Wuhan University
* Author: Chundan Wei
* Email: danuno@qq.com
* Version: 1.0
* Date: Oct 22, 2014 | 10:36:45 AM
* Description:*
* Licence:*
**********************************************************************/
#include <memory.h>
#include <string.h>
#include <stdio.h>
#include "config/GConfig.h"
#include "misc/BaseStruct.h"
#include "misc/Buffer.h"
#include "device/DeviceGlobalVar.cuh"
#include "DistributorKernel.cuh"
#include "misc/Cell.cuh"
#include "misc/ObjBox.cuh"
#include "misc/UpdateQNode.cuh"
#include "misc/UpdateCacheArea.cuh"
#include "misc/QueryCacheArea.h"
#include "misc/SyncFuncGPU.cuh"
__global__ void DpProcess(int *place_holder_query_dispatch_local,
int anchor_idx, int row_start, int row_end, int col_start, int col_end,
GConfig *p_config, QueryQNode *node_enqueue_query_local) {
int k = threadIdx.x;
int i = row_start + k / (col_end - col_start + 1);
int j = col_start + k % (col_end - col_start + 1);
const int EDGE_CELL_NUM = p_config->edge_cell_num;
const int LEN_SEG_CACHE_QUERY = p_config->len_seg_cache_query;
int *cnt_queries_per_cell = node_enqueue_query_local->cnt_queries_per_cell;
int *flag_cells_covered = node_enqueue_query_local->flag_cells_covered;
volatile int *volatile idx_cells_covered =
node_enqueue_query_local->idx_cells_covered;
int *queries_per_cell = node_enqueue_query_local->queries_per_cell;
int idx_cell = i * EDGE_CELL_NUM + j;
int idx_tmp_0;
int idx_tmp = atomicAdd(&cnt_queries_per_cell[idx_cell], 1);
atomicAdd(&check_tot_covered, 1);
if (idx_tmp >= LEN_SEG_CACHE_QUERY) {
atomicAdd(&cnt_queries_per_cell[idx_cell], -1);
atomicAdd(&cnt_over_seg_query, 1);
atomicAdd(place_holder_query_dispatch_local, 1);
return;
}
if (idx_tmp < 0) {
atomicAdd(place_holder_query_dispatch_local, 1);
return;
}
if (atomicCAS(&flag_cells_covered[idx_cell], 0, 1) == 0) {
atomicExch(&flag_cells_covered[idx_cell], 1);
idx_tmp_0 = atomicAdd(&node_enqueue_query_local->tot_cells_covered, 1);
idx_cells_covered[idx_tmp_0] = idx_cell;
}
queries_per_cell[idx_cell * LEN_SEG_CACHE_QUERY + idx_tmp] = anchor_idx;
atomicAdd(place_holder_query_dispatch_local, 1);
return;
}
__global__ void DistributorKernel(GConfig * dev_p_gconfig,
UpdateType * dev_buffer_update, int * dev_cnt_update,
QueryType * dev_buffer_query, int * dev_cnt_query,
UpdateCacheArea * d_req_cache_update, QueryCacheArea * d_req_cache_query,
Grid * d_index_A, Grid * d_index_B, CircularQueue * d_queue_bkts_free,
MemItem<QueryType> * d_qd_obj_pool,
CircularQueue * d_queue_idx_anchor_free, QueryType * d_qd_query_type_pool,
int * d_qd_anchor_pool, int * d_place_holder, ManagedMemory * d_mm) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int tid_b = threadIdx.x;
const unsigned int barrier_step = gridDim.x;
volatile unsigned int barrier_fence = 0;
int anchor = 0, anchor_i = 0;
//__shared__ int occupation[12288];
//extern __shared__ int s[];
const int WRAPSIZE = 32;
int realWrapSize = 0;
int realBlockSize = 0;
const int wid = tid / WRAPSIZE;
const int tid_w = tid % WRAPSIZE;
GConfig * p_config = dev_p_gconfig;
//for debug
int check_dist_query_local = 0;
//int check_tot_covered_local = 0;
int check_cnt_enqueue = 0;
int check_lb_null = 0;
int check_rt_null = 0;
long long int start = -1, end;
//long long int copytime = 0;
//double sum_time = 0;
// printf("%d\n", gp_config->block_analysis_num);
// Initialize global variables
if (tid == 0) {
atomicExch(&buffer_exhausted, 0);
atomicExch(&rebalance, 0);
atomicExch(&launch_signal, 0);
atomicExch(&exit_query, 0);
atomicExch(&exit_update, 0);
}
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
printf("sync-1 over\n");
}
__syncthreads();
// Initialize local variables
const int EDGE_CELL_NUM = p_config->edge_cell_num;
const int TOT_CELLS = p_config->edge_cell_num * p_config->edge_cell_num;
const int TOT_VGROUP_UPDATE = p_config->side_len_vgroup
* p_config->side_len_vgroup;
//int query_dispatch_fence = 0;
//int *sync_holder_query_dispatch_local = &sync_holder_query_dispatch[wid];
int *place_holder_query_dispatch_local = &place_holder_query_dispatch[wid];
int *cache_memory_idx_query_dispatch_local =
&cache_memory_idx_query_dispatch[wid];
int *place_holder_update_dispatch_local = &place_holder_update_dispatch[wid
* UPDATE_DISPATCH_SEG];
ManagedMemory lmm = *d_mm;
const int QUERY_DISPATCH_WRAP_SIZE = 512;
// Initialize local update variables
Grid * local_index = NULL;
SecIndex * local_seci = NULL;
UpdateType *buffer_update = dev_buffer_update;
int offset_buffer_update = offset_buffer_update_rec;
const int TOTAL_UPDATE = p_config->max_obj_num * p_config->round_num;
int buffer_block_size_update = p_config->buffer_block_size;
int len_seg_cache_update_local = len_seg_cache_update;
int *local_d;
int *local_d_pool;
int *local_d_cnt;
int *local_i;
UpdateType *local_i_pool;
int *local_i_cnt;
int *local_f;
UpdateType *local_f_pool;
int *local_f_cnt;
CircularQueue * local_d_fqueue, *local_i_fqueue, *local_f_fqueue;
int idx_start, idx_last;
MemElement *tmp_me;
int d_seg, i_seg, f_seg;
Cell *p_cell_new = NULL;
Cell *p_cell_old = NULL;
//Cell *p_cell_marked = NULL;
int oid = -1;
SIEntry * p_sie = NULL;
UpdateType *ins_update;
// Initialize local query variables
QueryType *buffer_query = dev_buffer_query, *p_buffer_block_query,
req_query;
int offset_buffer_query = 0;
const int TOTAL_QUERY = p_config->max_query_num;
const int LEFT_UPDATE_AFTERSKIP = TOTAL_UPDATE
- p_config->query_skip_round_num * p_config->max_obj_num;
int buffer_block_size_query = offset_buffer_query_rec;
if (LEFT_UPDATE_AFTERSKIP > p_config->buffer_block_size) {
buffer_block_size_query = (int) ((double) p_config->buffer_block_size
* (double) p_config->max_query_num
/ (double) LEFT_UPDATE_AFTERSKIP);
} else {
buffer_block_size_query = p_config->max_query_num;
}
const int LEN_SEG_CACHE_QUERY = p_config->len_seg_cache_query;
QueryType *d_qd_query_type_local;
int *d_qd_anchor_local;
const int QUERY_TYPE_POOL_SIZE = lmm.mm_qd_query_type_pool.len;
const int QUERY_SKIP_NUM = p_config->query_skip_round_num
* p_config->max_obj_num;
const int QT_SIZE = p_config->qt_size;
const int QUEUE_SEG_LEN = p_config->len_seg_multiqueue;
const int MQUEUE_SIZE = p_config->len_multiqueue;
//int idx_query = 0;
int idx_cell = 0, idx_tmp = 0, idx_tmp_0 = 0;
Grid * p_grid = NULL;
Cell *p_cell_rt = NULL, *p_cell_lb = NULL;
int left_bottom;
int right_top;
int cell_num;
int row_start;
int col_end;
int row_end;
int col_start;
float xmin, ymin, xmax, ymax;
QueryQNode * node_enqueue_query_local = NULL;
//int tot_cells_covered = 0;
int *flag_cells_covered = NULL;
volatile int *volatile idx_cells_covered = NULL;
int *cnt_queries_per_cell = NULL;
int *queries_per_cell = NULL;
QueryType *buffer_block_query = NULL;
if (tid == 0) {
atomicExch(&launch_signal, 1);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
printf("sync-2 over\n");
}
__syncthreads();
if (tid == 0) {
atomicExch(&req_cache_update->token0, 0);
atomicExch(&req_cache_update->token1, 0);
atomicExch(&req_cache_query->token0, 0);
atomicExch(&req_cache_query->token1, 0);
}
/* for(int k = 0; k<2; k++){
CircularQueue* fqueue_local;
for (int i = 0; i<3 ;i++)
{
if (i == 0)
fqueue_local = req_cache_update->array[k].fqueue_delete;
else if (i == 1)
fqueue_local = req_cache_update->array[k].fqueue_insert;
else if (i == 2)
fqueue_local = req_cache_update->array[k].fqueue_fresh;
anchor = tid;
while(anchor < fqueue_local->capacity){
if(anchor<TOT_VGROUP_UPDATE)
{
}
atomicExch(&fqueue_local->avail_idx_bkt[anchor], anchor);
anchor += blockDim.x*gridDim.x;
}
if(tid == 0){
atomicExch(&fqueue_local->cnt_elem, fqueue_local->capacity);
atomicExch(&fqueue_local->head, 0);
atomicExch(&fqueue_local->rear, 0);
}
}
}*/
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
while (offset_buffer_update < TOTAL_UPDATE) {
// Distribute Updates
if (offset_buffer_update + buffer_block_size_update >= TOTAL_UPDATE) {
buffer_block_size_update = TOTAL_UPDATE - offset_buffer_update;
}
while ((flag_switch_dist == 1
&& (req_cache_update->token0 == 1
|| req_cache_query->token0 == 1))
|| (flag_switch_dist == 0
&& (req_cache_update->token1 == 1
|| req_cache_query->token1 == 1))) {
if (tid == 0)
exp_hunger_dist0++;
}
if (query_time_per_period != 0 && update_time_per_period != 0) {
double Tdis = dis_time_per_period;
double Tupd = update_time_per_period;
double Tque = query_time_per_period;
double Dis = p_config->block_analysis_num;
double Upd = p_config->block_update_num;
double Que = p_config->block_query_num;
double TdisDis = Tdis * (double) Dis;
double TupdUpd = Tupd * (double) Upd;
double TqueQue = Tque * (double) Que;
double x_double =
(TdisDis * (Upd + Que) - Dis * (TupdUpd + TqueQue))\
/ (TdisDis + TupdUpd + TqueQue);
double y_double = TupdUpd / TdisDis * x_double + TupdUpd / Tdis
- Upd;
if (tid == 0)
printf("relance, x is %.2f, y is %.2f \n", x_double, y_double);
int x_int = (int) (x_double);
int y_int = (int) (y_double);
if (offset_buffer_update
<= (p_config->query_skip_round_num + 2)
* p_config->max_obj_num) {
x_double = .0f;
y_double = .0f;
x_int = 0;
y_int = 0;
}
#if REBALANCE == 0
x_double = 0;
y_int = 0;
x_int = 0;
y_double = 0;
#endif
if (abs(x_int) >= 1 || abs(y_int) >= 1) {
int newDis = Dis + x_int;
int newQue = Que - x_int;
if (abs(newDis) < 1) {
x_int = 1 - Dis;
newDis = Dis + x_int;
newQue = Que - x_int;
}
if (abs(newQue) < 1) {
x_int = Que - 1;
newDis = Dis + x_int;
newQue = Que - x_int;
}
int newUpd = Upd + y_int;
int newQue2 = newQue - y_int;
if (abs(newUpd) < 1) {
y_int = 1 - Upd;
newUpd = Upd + y_int;
newQue2 = newQue - y_int;
}
if (abs(newQue2) < 1) {
y_int = newQue - 1;
newUpd = Upd + y_int;
newQue2 = newQue - y_int;
}
p_config->block_analysis_num = newDis;
p_config->block_update_num = newUpd;
p_config->block_query_num = newQue2;
atomicExch(&buffer_exhausted, 1);
atomicExch(&rebalance, 1);
if (tid == 0)
printf("new kernels' rate is %d : %d : %d\n",
p_config->block_analysis_num,
p_config->block_update_num,
p_config->block_query_num);
break;
}
}
buffer_update = dev_buffer_update
+ (offset_buffer_update_rec
% (p_config->buffer_block_size
* p_config->buffer_update_round));
while (update_map[(int) (offset_buffer_update_rec
/ p_config->buffer_block_size)\
% p_config->buffer_update_round]
== 0)
__threadfence_system();
if (tid == 0) {
if (flag_switch_dist == 1 && req_cache_update->token0 == 0
&& req_cache_query->token0 == 0) {
node_enqueue_update = &req_cache_update->array[0];
flag_switch_dist = 0;
} else if (flag_switch_dist == 0 && req_cache_update->token1 == 0
&& req_cache_query->token1 == 0) {
node_enqueue_update = &req_cache_update->array[1];
flag_switch_dist = 1;
}
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
start = clock64();
if (flag_switch_version == 0) {
local_index = index_A;
local_seci = sec_index_A;
} else if (flag_switch_version == 1) {
local_index = index_B;
local_seci = sec_index_B;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
local_d = node_enqueue_update->mtx_delete_idx; //local_d refers to Delete Cache Area(oid1, oid2,...,oidn), local_d_cnt refers to its oid num;
local_d_cnt = node_enqueue_update->sum_d;
//local_d_pool = node_enqueue_update->mtx_delete_pool;
local_d_fqueue = node_enqueue_update->fqueue_delete;
//d_seg = node_dequeue_update->d_size/TOT_CELLS;
local_i = node_enqueue_update->mtx_insert_idx; //local_i refers to Insert Cache Area(req1, req2,..., reqn), local_i_cnt refers to its request num;
local_i_cnt = node_enqueue_update->sum_i;
//local_i_pool = node_enqueue_update->mtx_insert_pool;
local_i_fqueue = node_enqueue_update->fqueue_insert;
//i_seg = node_dequeue_update->i_size/TOT_CELLS;
local_f = node_enqueue_update->mtx_fresh_idx; //local_f refers to Fresh Cache Area(req1, req2,..., reqn), local_f_cnt refers to its request num;
local_f_cnt = node_enqueue_update->sum_f;
//local_f_pool = node_enqueue_update->mtx_fresh_pool;
local_f_fqueue = node_enqueue_update->fqueue_fresh;
//f_seg = node_dequeue_update->f_size/TOT_CELLS;
CircularQueue * fqueue_local;
for (int i = 0; i < 2; i++) {
if (i == 0)
fqueue_local = node_enqueue_update->fqueue_delete;
else if (i == 1)
fqueue_local = node_enqueue_update->fqueue_insert;
anchor = tid;
while (anchor < fqueue_local->capacity) {
fqueue_local->avail_idx_bkt[anchor] = anchor;
anchor += blockDim.x * gridDim.x;
}
if (tid == 0) {
atomicExch(&fqueue_local->cnt_elem, fqueue_local->capacity);
atomicExch(&fqueue_local->head, 0);
atomicExch(&fqueue_local->rear, 0);
}
}
#if CHECK_SI == 1
if (tid_b == 0)
while (local_seci->index[p_config->max_obj_num].idx_cell != -1);
#endif
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
#if CHECK_SI == 1
atomicExch(&(local_seci->index[p_config->max_obj_num].idx_cell),
p_config->max_obj_num);
#endif
int *mtx_idx;
fqueue_local = node_enqueue_update->fqueue_delete;
mtx_idx = node_enqueue_update->mtx_delete_idx;
anchor = tid;
idx_tmp = fqueue_local->head;
while (anchor < TOT_VGROUP_UPDATE) {
fqueue_local->avail_idx_bkt[(idx_tmp + anchor)
% fqueue_local->capacity] = -1;
mtx_idx[anchor] = idx_tmp + anchor;
node_enqueue_update->mtx_delete_nodes->last[anchor] = idx_tmp
+ anchor;
anchor += blockDim.x * gridDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (tid == 0) {
atomicAdd(&fqueue_local->cnt_elem, -TOT_VGROUP_UPDATE);
atomicAdd(&fqueue_local->head, TOT_VGROUP_UPDATE);
cudaMemcpyAsync(node_enqueue_update->mtx_delete_nodes->mes,
node_enqueue_update->mtx_delete_nodes_bak,
sizeof(MemElement) * node_enqueue_update->d_size,
cudaMemcpyDeviceToDevice);
cudaMemsetAsync(node_enqueue_update->mtx_delete_nodes->cnt, 0,
sizeof(int) * node_enqueue_update->d_size);
}
cudaDeviceSynchronize();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
fqueue_local = node_enqueue_update->fqueue_insert;
mtx_idx = node_enqueue_update->mtx_insert_idx;
anchor = tid;
idx_tmp = fqueue_local->head;
while (anchor < TOT_VGROUP_UPDATE) {
fqueue_local->avail_idx_bkt[(idx_tmp + anchor)
% fqueue_local->capacity] = -1;
mtx_idx[anchor] = idx_tmp + anchor;
node_enqueue_update->mtx_insert_nodes->last[anchor] = idx_tmp
+ anchor;
anchor += blockDim.x * gridDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (tid == 0) {
atomicAdd(&fqueue_local->cnt_elem, -TOT_VGROUP_UPDATE);
atomicAdd(&fqueue_local->head, TOT_VGROUP_UPDATE);
cudaMemcpyAsync(node_enqueue_update->mtx_insert_nodes->mes,
node_enqueue_update->mtx_insert_nodes_bak,
sizeof(MemElement) * node_enqueue_update->i_size,
cudaMemcpyDeviceToDevice);
cudaMemsetAsync(node_enqueue_update->mtx_insert_nodes->cnt, 0,
sizeof(int) * node_enqueue_update->i_size);
}
cudaDeviceSynchronize();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
fqueue_local = node_enqueue_update->fqueue_insert;
mtx_idx = node_enqueue_update->mtx_fresh_idx;
anchor = tid;
idx_tmp = fqueue_local->head;
while (anchor < TOT_VGROUP_UPDATE) {
fqueue_local->avail_idx_bkt[(anchor + idx_tmp)
% fqueue_local->capacity] = -1;
mtx_idx[anchor] = idx_tmp + anchor;
node_enqueue_update->mtx_insert_nodes->last[TOT_VGROUP_UPDATE
+ anchor] = idx_tmp + anchor;
anchor += blockDim.x * gridDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (tid == 0) {
atomicAdd(&fqueue_local->cnt_elem, -TOT_VGROUP_UPDATE);
atomicAdd(&fqueue_local->head, TOT_VGROUP_UPDATE);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
anchor = tid;
while (anchor < TOT_VGROUP_UPDATE) {
local_d_cnt[anchor] = 0;
local_i_cnt[anchor] = 0;
local_f_cnt[anchor] = 0;
anchor += blockDim.x * gridDim.x;
}
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
/* if(tid == 0){
end = clock64();
double temp_sum_time = ((double)(end - start))/p_config->clockRate;
sum_time += temp_sum_time;
printf("d1-0 %.4f ms\n", temp_sum_time);
}
start = clock64();
*/
if (tid_w == 0)
place_holder_update_dispatch_local[0] = 0;
MemElementCollection<UpdateType> *local_node_i =
node_enqueue_update->mtx_insert_nodes;
MemElementCollection<int> *local_node_d =
node_enqueue_update->mtx_delete_nodes;
CircularQueue * local_fqueue_u; //= local_i_fqueue;
anchor = tid;
//anchor = buffer_block_size_update;
while (anchor < buffer_block_size_update) {
#if SEG_CACHE == 0
if (place_holder_update_dispatch_local[0] < 0)
place_holder_update_dispatch_local[0] = 0;
if (place_holder_update_dispatch_local[0] < UPDATE_DISPATCH_SEG - 1)
{
if (tid_w
< UPDATE_DISPATCH_SEG - 1
- place_holder_update_dispatch_local[0])
{
int cnt_elem = atomicAdd(&local_i_fqueue->cnt_elem, -1);
int idx_in_queue = atomicAdd(&local_i_fqueue->head, 1);
int idx_anchor_in_pool =
local_i_fqueue->avail_idx_bkt[idx_in_queue
% local_i_fqueue->capacity];
if (idx_anchor_in_pool == -1)
{
printf("local_f_fqueue empty error!");
atomicAdd(&cnt_over_seg_update, 1);
break;
}
else
{
atomicExch(
&local_i_fqueue->avail_idx_bkt[idx_in_queue
% local_i_fqueue->capacity], -1);
}
place_holder_update_dispatch_local[place_holder_update_dispatch_local[0]
+ 1 + tid_w] = idx_anchor_in_pool;
}
if (tid_w == 0)
{
place_holder_update_dispatch_local[0] = UPDATE_DISPATCH_SEG
- 1;
}
}
#endif
ins_update = &buffer_update[anchor];
if (ins_update->oid > p_config->max_obj_num) {
anchor += gridDim.x * blockDim.x;
continue;
}
if (ins_update->x == 0 && ins_update->y == 0)
printf("load data error\n");
//ins_update = &buffer_update[anchor];
oid = ins_update->oid;
p_sie = &(local_seci->index[oid]); // p_sie refers to a SIEntry object; local_seci refers to a SecIndex object;
if (p_sie->idx_cell >= 0) {
p_cell_old = &local_index->arr_cell[p_sie->idx_cell]; //local_index refers to the Grid
} else {
#if IGNORE_CNT == 0
atomicAdd(&exp_old_cell_null, 1);
#endif
p_cell_old = NULL;
}
p_cell_new = local_index->getCellByXY(ins_update->x, ins_update->y);
if (p_cell_new == NULL) {
atomicAdd(&exp_new_cell_null, 1);
}
// (p_cell_new != NULL) && (p_cell_new != p_cell_old) insert
// (p_cell_old != NULL) && (p_cell_old == p_cell_new) refresh
if (p_cell_new != NULL) {
local_fqueue_u = local_i_fqueue;
//int *local_cnt;
if (p_cell_new != p_cell_old) {
//local_cnt = local_i_cnt;
idx_tmp_0 = p_cell_new->subgrid;
mtx_idx = local_i;
} else {
//local_cnt = local_f_cnt;
idx_tmp_0 = p_cell_new->subgrid + TOT_VGROUP_UPDATE;
mtx_idx = local_f;
}
//atomicAdd(&local_cnt[p_cell_new->subgrid], 1); //p_cell_new->subgrid refers to the Cell ID
idx_last = 0; //last
while (true) {
idx_last = local_node_i->last[idx_tmp_0];
if (local_node_i->cnt[idx_last] < local_node_i->LEN) {
idx_tmp = atomicAdd(&(local_node_i->cnt[idx_last]), 1);
//idx_tmp = atomicAdd(&(local_node_i->cnt[idx_last]), -1);
if (idx_tmp + 1 >= local_node_i->LEN) {
atomicAdd(&(local_node_i->cnt[idx_last]), -1);
#if SEG_CACHE == 0
tmp_me = &(local_node_i->mes[idx_last]);
if (tmp_me->next == -1)
{
if (atomicCAS(&tmp_me->lock, 0, tid + 1) == 0)
{
if (atomicCAS(&tmp_me->next, -1, -1) == -1)
{
int idx_anchor_in_pool;
int local_cache_page_idx =
atomicAdd(
place_holder_update_dispatch_local,
-1);
if (local_cache_page_idx <= 0)
{
int cnt_elem = atomicAdd(
&local_fqueue_u->cnt_elem,
-1);
int idx_in_queue = atomicAdd(
&local_fqueue_u->head, 1);
idx_anchor_in_pool =
local_fqueue_u->avail_idx_bkt[idx_in_queue
% local_fqueue_u->capacity];
if (idx_anchor_in_pool == -1)
{
printf(
"local_i&f_fqueue empty error!\t");
atomicAdd(&cnt_over_seg_update,
1);
atomicCAS(&tmp_me->lock,
tid + 1, 0);
break;
}
else
{
atomicExch(
&local_fqueue_u->avail_idx_bkt[idx_in_queue
% local_fqueue_u->capacity],
-1);
}
}
else
{
//atomicAdd(&exp_new_cell_null, 1);
idx_anchor_in_pool =
place_holder_update_dispatch_local[local_cache_page_idx];
}
atomicExch(&tmp_me->next,
idx_anchor_in_pool);
atomicExch(
&(local_node_i->last[idx_tmp_0]),
idx_anchor_in_pool);
}
atomicCAS(&tmp_me->lock, tid + 1, 0);
}
}
#else
atomicAdd(&cnt_over_seg_update, 1);
break;
#endif
} else {
local_node_i->pool[idx_last * local_node_i->LEN
+ idx_tmp] = *ins_update;
break;
}
//break;
}
#if SEG_CACHE == 1
break;
#endif
}
}
if ((p_cell_new != NULL) && (p_cell_old != NULL)
&& (p_cell_old != p_cell_new)) {
local_fqueue_u = local_d_fqueue;
while (true) {
idx_last = local_node_d->last[p_cell_old->subgrid];
if (local_node_d->cnt[idx_last] < local_node_d->LEN) {
idx_tmp = atomicAdd(&(local_node_d->cnt[idx_last]), 1);
//idx_tmp = atomicAdd(&(local_node_d->cnt[idx_last]), -1);
if (idx_tmp + 1 >= local_node_d->LEN) {
atomicAdd(&(local_node_d->cnt[idx_last]), -1);
#if SEG_CACHE == 0
tmp_me = &(local_node_d->mes[idx_last]);
if (tmp_me->next == -1)
{
if (atomicCAS(&tmp_me->lock, 0, tid + 1) == 0)
{
if (atomicCAS(&tmp_me->next, -1, -1) == -1)
{
int idx_anchor_in_pool;
int cnt_elem = atomicAdd(
&local_fqueue_u->cnt_elem, -1);
int idx_in_queue = atomicAdd(
&local_fqueue_u->head, 1);
idx_anchor_in_pool =
local_fqueue_u->avail_idx_bkt[idx_in_queue
% local_fqueue_u->capacity];
if (idx_anchor_in_pool == -1)
{
printf(
"local_d_fqueue empty error!");
atomicAdd(&cnt_over_seg_update, 1);
atomicCAS(&tmp_me->lock, tid + 1,
0);
break;
}
else
{
atomicExch(
&local_fqueue_u->avail_idx_bkt[idx_in_queue
% local_fqueue_u->capacity],
-1);
}
atomicExch(&tmp_me->next,
idx_anchor_in_pool);
atomicExch(
&(local_node_d->last[p_cell_old->subgrid]),
idx_anchor_in_pool);
}
atomicCAS(&tmp_me->lock, tid + 1, 0);
}
}
#else
atomicAdd(&cnt_over_seg_update, 1);
break;
#endif
} else {
if (idx_tmp > local_node_d->LEN)
printf("idx_tmp error\n");
local_node_d->pool[idx_last * local_node_d->LEN
+ idx_tmp] = ins_update->oid;
break;
}
//break;
}
#if SEG_CACHE == 1
break;
#endif
}
}
anchor += blockDim.x * gridDim.x;
}
offset_buffer_update += buffer_block_size_update;
__threadfence_system();
cudaDeviceSynchronize();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (tid == 0)
atomicExch(
update_map
+ (int) (offset_buffer_update_rec
/ p_config->buffer_block_size)
% p_config->buffer_update_round, 0);
//if(tid == 0) printf("local_i_fqueue used %d \n", local_i_fqueue->capacity-local_fqueue_u->cnt_elem);
#if CHECK_UPDATE_MEMPOOL == 1
anchor = tid;
while(anchor < TOT_VGROUP_UPDATE)
{
int tmp_idx = node_enqueue_update->mtx_insert_idx[anchor];
int tmp_cnt = 0;
do
{
MemItem<UpdateType>* qn_cursor = &node_enqueue_update->mtx_insert_node[tmp_idx];
tmp_cnt += qn_cursor->cnt;
tmp_idx = qn_cursor->next;
}while(tmp_idx != -1);
atomicAdd(&cnt_over_seg_update, tmp_cnt);
atomicAdd(&cnt_over_seg_query, local_i_cnt[anchor]);
anchor+=gridDim.x*blockDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
#endif
if (tid == 0) {
#if CHECK_UPDATE_MEMPOOL == 1
printf("\nInsert:\n");
for(int i = 0; i<TOT_VGROUP_UPDATE; i+=100)
{
printf("%d ",local_i_cnt[i]);
}
printf("\n\n");
printf("\nInsert check:\n");
for(int i = 0; i<TOT_VGROUP_UPDATE; i+=100)
{
int tmp_idx = node_enqueue_update->mtx_insert_idx[i];
int tmp_cnt = 0;
do
{
MemItem<UpdateType>* qn_cursor = &node_enqueue_update->mtx_insert_node[tmp_idx];
tmp_cnt += qn_cursor->cnt;
tmp_idx = qn_cursor->next;
}while(tmp_idx != -1);
printf("%d ",tmp_cnt);
}
printf("\n\n");
#endif
end = clock64();
long long int temp_sum_time = ((double) (end - start));
atomicExch(&dis_time_per_period, (int) (end - start));
if (offset_buffer_update > QUERY_SKIP_NUM)
distribute_sumtime += temp_sum_time;
printf("d1 %.4f ms\n",
(double) temp_sum_time / (double) p_config->clockRate);
atomicExch(&offset_buffer_update_rec, offset_buffer_update);
if (flag_switch_dist == 0) {
atomicExch(&req_cache_update->cnt0, cnt_enqueue_update);
atomicExch(&req_cache_update->token0, 1);
}
if (flag_switch_dist == 1) {
atomicExch(&req_cache_update->cnt1, cnt_enqueue_update);
atomicExch(&req_cache_update->token1, 1);
}
node_enqueue_update = NULL;
atomicAdd(&cnt_enqueue_update, 1);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (offset_buffer_update <= QUERY_SKIP_NUM) {
if (tid == 0) {
atomicAdd(&cnt_enqueue_query, 1);
atomicAdd(&cnt_dequeue_query, 1);
}
continue;
}
if (offset_buffer_update <= QUERY_SKIP_NUM + buffer_block_size_update) {
if (tid == 0) {
//start = clock64();
}
}
// ------------------------------------------------------------------------------------------------------------
// Distribute Queries
if (offset_buffer_query + buffer_block_size_query >= TOTAL_QUERY) {
buffer_block_size_query = TOTAL_QUERY - offset_buffer_query;
}
if (flag_switch_dist == 0) {
while (req_cache_query->token0 == 1) {
if (tid == 0)
exp_hunger_dist1++;
}
} else if (flag_switch_dist == 1) {
while (req_cache_query->token1 == 1) {
if (tid == 0)
exp_hunger_dist1++;
}
}
start = clock64();
if (tid == 0) {
if (flag_switch_dist == 0) {
node_enqueue_query = &req_cache_query->array[0];
} else if (flag_switch_dist == 1) {
node_enqueue_query = &req_cache_query->array[1];
}
node_enqueue_query->tot_cells_covered = 0;
node_enqueue_query->bound_btw_cell[0] = 0;
node_enqueue_query->buffer_block_size_query =
buffer_block_size_query;
atomicExch(&cnt_singular, 0);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
node_enqueue_query_local = node_enqueue_query;
flag_cells_covered = node_enqueue_query->flag_cells_covered;
idx_cells_covered = node_enqueue_query->idx_cells_covered;
cnt_queries_per_cell = node_enqueue_query->cnt_queries_per_cell;
queries_per_cell = node_enqueue_query->queries_per_cell;
buffer_block_query = node_enqueue_query->buffer_block_query;
node_enqueue_query->offset_buffer_query = offset_buffer_query;
if (flag_switch_version == 0) {
p_grid = index_A;
} else if (flag_switch_version == 1) {
p_grid = index_B;
}
anchor = tid;
while (anchor < TOT_CELLS) {
cnt_queries_per_cell[anchor] = 0;
anchor += blockDim.x * gridDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
p_buffer_block_query = buffer_query + offset_buffer_query;
#if (QUERY_PATTERN != 3 && QUERY_PATTERN != 30)
for (int i = 0; i < 1; i++) {
fqueue_local = node_enqueue_query->fqueue_query;
anchor = tid;
while (anchor < fqueue_local->capacity) {
fqueue_local->avail_idx_bkt[anchor] = anchor;
anchor += blockDim.x * gridDim.x;
}
if (tid == 0) {
atomicExch(&fqueue_local->cnt_elem, fqueue_local->capacity);
atomicExch(&fqueue_local->head, 0);
atomicExch(&fqueue_local->rear, 0);
}
}
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
fqueue_local = node_enqueue_query->fqueue_query;
mtx_idx = node_enqueue_query->mtx_query_idx;
anchor = tid;
idx_tmp = fqueue_local->head;
while (anchor < TOT_VGROUP_UPDATE) {
fqueue_local->avail_idx_bkt[(idx_tmp + anchor)
% fqueue_local->capacity] = -1;
mtx_idx[anchor] = idx_tmp + anchor;
node_enqueue_query->mtx_query_nodes->last[anchor] = idx_tmp
+ anchor;
anchor += blockDim.x * gridDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (tid == 0) {
atomicAdd(&fqueue_local->cnt_elem, -TOT_VGROUP_UPDATE);
atomicAdd(&fqueue_local->head, TOT_VGROUP_UPDATE);
cudaMemcpyAsync(node_enqueue_query->mtx_query_nodes->mes,
node_enqueue_query->mtx_query_nodes_bak,
sizeof(MemElement) * node_enqueue_query->q_size,
cudaMemcpyDeviceToDevice);
cudaMemsetAsync(node_enqueue_query->mtx_query_nodes->cnt, 0,
sizeof(int) * node_enqueue_query->q_size);
}
cudaDeviceSynchronize();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
anchor = tid;
#if (USE_MULTIQUEUE == 1)
anchor = tid;
while (anchor < QUERY_TYPE_POOL_SIZE)
{
d_qd_anchor_pool[anchor] = -1;
anchor += blockDim.x * gridDim.x;
}
if (tid == 0)
{
atomicExch(&d_queue_idx_anchor_free->cnt_elem,
d_queue_idx_anchor_free->capacity);
atomicExch(&d_queue_idx_anchor_free->head, 0);
atomicExch(&d_queue_idx_anchor_free->rear, 0);
}
anchor = tid;
while (anchor < d_queue_idx_anchor_free->capacity)
{
atomicExch(&d_queue_idx_anchor_free->avail_idx_bkt[anchor], anchor);
anchor += blockDim.x * gridDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence)
;
}
__syncthreads();
anchor = tid;
while (anchor < MQUEUE_SIZE)
{
atomicAdd(&d_queue_idx_anchor_free->cnt_elem, -1);
int idx_in_queue = atomicAdd(&d_queue_idx_anchor_free->head, 1);
int idx_anchor_in_pool =
d_queue_idx_anchor_free->avail_idx_bkt[idx_in_queue
% d_queue_idx_anchor_free->capacity];
d_queue_idx_anchor_free->avail_idx_bkt[idx_in_queue
% d_queue_idx_anchor_free->capacity] = -1;
MemItem<QueryType>* qn = &d_qd_obj_pool[idx_anchor_in_pool];
qn->id = idx_anchor_in_pool;
qn->cnt = 0;
qn->queuelen = 1;
qn->len = QT_SIZE;
qn->next = -1;
qn->last = idx_anchor_in_pool;
qn->lock = 0;
multiqueue[anchor] = idx_anchor_in_pool;
anchor += gridDim.x * blockDim.x;
}
anchor = tid;
while (anchor < 512)
{
cache_memory_idx_query_dispatch[anchor] = -1;
anchor += gridDim.x * blockDim.x;
}
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence)
;
}
__syncthreads();
anchor = tid;
while (anchor < buffer_block_size_query)
{
req_query = p_buffer_block_query[anchor];
xmin = req_query.minX;
ymin = req_query.minY;
xmax = req_query.maxX;
ymax = req_query.maxY;
p_cell_lb = p_grid->getNearestCellByXY(xmin, ymin);
p_cell_rt = p_grid->getNearestCellByXY(xmax, ymax);
left_bottom = p_cell_lb->idx;
right_top = p_cell_rt->idx;
cell_num = p_grid->cell_num;
row_start = right_top / cell_num;
col_end = right_top % cell_num;
row_end = left_bottom / cell_num;
col_start = left_bottom % cell_num;
int cellNum = (row_end - row_start + 1) * (col_end - col_start + 1);
int count = cellNum / QUEUE_SEG_LEN + 1;
if (count >= MQUEUE_SIZE)
{
printf("count over MQUEUE_SIZE\n");
continue;
}
MemItem<QueryType>* qn_start = &d_qd_obj_pool[multiqueue[count]];
MemItem<QueryType>* qn_last;
bool errorFlag = false;
while (true)
{
qn_last = &d_qd_obj_pool[qn_start->last];
if (qn_last->cnt >= qn_last->len)
{
while (qn_last->cnt >= qn_last->len)
ins_update = &buffer_update[anchor]; //donothing
continue;
}
if (*cache_memory_idx_query_dispatch_local == -1)
{
if (tid_w == 0)
{
int cnt_elem = atomicAdd(
&d_queue_idx_anchor_free->cnt_elem, -1);
int idx_in_queue = atomicAdd(
&d_queue_idx_anchor_free->head, 1);
int idx_anchor_in_pool =
d_queue_idx_anchor_free->avail_idx_bkt[idx_in_queue
% d_queue_idx_anchor_free->capacity];
if (idx_anchor_in_pool == -1)
{
printf("idx_anchor_in_pool empty error!");
errorFlag = true;
break;
}
else
{
atomicExch(
&d_queue_idx_anchor_free->avail_idx_bkt[idx_in_queue
% d_queue_idx_anchor_free->capacity],
-1);
MemItem<QueryType>* newQn =
&d_qd_obj_pool[idx_anchor_in_pool];
atomicExch(&newQn->id, idx_anchor_in_pool);
atomicExch(&newQn->cnt, 0);
atomicExch(&newQn->queuelen, 1);
atomicExch(&newQn->len, QT_SIZE);
atomicExch(&newQn->next, -1);
atomicExch(&newQn->last, idx_anchor_in_pool);
atomicExch(&newQn->lock, 0);
atomicExch(cache_memory_idx_query_dispatch_local,
idx_anchor_in_pool);
}
}
}
idx_tmp = atomicAdd(&qn_last->cnt, 1);
if (idx_tmp + 1 >= qn_last->len)
{
atomicAdd(&qn_last->cnt, -1);
idx_tmp_0 = atomicExch(
cache_memory_idx_query_dispatch_local, -1);
if (idx_tmp_0 > 0)
{
if (qn_last->next == -1)
{
if (atomicCAS(&qn_last->lock, 0, 1) == 0)
{
if (atomicCAS(&qn_last->next, -1, -1) == -1)
{
atomicExch(&qn_last->next, idx_tmp_0);
atomicExch(&qn_start->last, idx_tmp_0);
atomicAdd(&qn_start->queuelen, 1);
atomicCAS(&qn_last->lock, 1, 0);
continue;
}
atomicCAS(&qn_last->lock, 1, 0);
}
}
atomicExch(cache_memory_idx_query_dispatch_local,
idx_tmp_0);
}
continue;
}
break;
}
if (!errorFlag)
{
qn_last->pool[idx_tmp] = req_query;
qn_last->cache_anchor[idx_tmp] = anchor;
}
#if IGNORE_CNT==0
atomicAdd(&cnt_queries, 1);
#endif
anchor += blockDim.x * gridDim.x;
}
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence)
;
}
__syncthreads();
anchor = tid;
while (anchor < blockDim.x * gridDim.x)
{
if (*cache_memory_idx_query_dispatch_local != -1)
{
if (tid_w == 0)
{
*cache_memory_idx_query_dispatch_local = -1;
}
}
anchor += blockDim.x * gridDim.x;
}
#if CHECK_QUERY_MEMPOOL==1
if(tid == 0)
{
atomicExch(&query_sum_formempool, 0);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
anchor = tid;
while(anchor < MQUEUE_SIZE)
{
int cursor_idx = multiqueue[anchor];
MemItem<QueryType>* qn_cursor;
int query_cnt_temp = 0;
while(cursor_idx != -1)
{
qn_cursor = &d_qd_obj_pool[cursor_idx];
query_cnt_temp += qn_cursor->cnt;
cursor_idx = qn_cursor->next;
}
printf("%d ", query_cnt_temp);
atomicAdd(&query_sum_formempool, query_cnt_temp);
anchor += gridDim.x*blockDim.x;
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if(tid == 0)
{
printf("\n");
printf("SUM : %d\n", query_sum_formempool);
}
#endif
if (tid == 0)
{
atomicExch(&cursor_distribute_wrap, 0);
atomicExch(&cnt_distribute_wrap, 0);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence)
;
}
__syncthreads();
MemElementCollection<int>* local_node_q =
node_enqueue_query->mtx_query_nodes;
CircularQueue* local_fqueue_q = node_enqueue_query->fqueue_query;
while (true)
{
if (cursor_distribute_wrap * QUERY_DISPATCH_WRAP_SIZE
>= QUERY_TYPE_POOL_SIZE)
break;
if (tid_w == 0)
{
*place_holder_query_dispatch_local = QUERY_DISPATCH_WRAP_SIZE
* atomicAdd(&cursor_distribute_wrap, 1);
}
if (*place_holder_query_dispatch_local >= QUERY_TYPE_POOL_SIZE)
{
break; //exit;
}
if (*place_holder_query_dispatch_local < 0)
{
break;
}
int anchor_end = QUERY_TYPE_POOL_SIZE
- *place_holder_query_dispatch_local;
if (anchor_end > QUERY_DISPATCH_WRAP_SIZE)
anchor_end = QUERY_DISPATCH_WRAP_SIZE;
d_qd_anchor_local = d_qd_anchor_pool
+ (*place_holder_query_dispatch_local);
d_qd_query_type_local = d_qd_query_type_pool
+ (*place_holder_query_dispatch_local);
anchor = tid_w;
int anchor_idx;
while (anchor < QUERY_DISPATCH_WRAP_SIZE)
{
if (anchor >= anchor_end)
break;
anchor_idx = d_qd_anchor_local[anchor];
if (anchor_idx < 0)
{
break;
}
req_query = d_qd_query_type_local[anchor];
xmin = req_query.minX;
ymin = req_query.minY;
xmax = req_query.maxX;
ymax = req_query.maxY;
p_cell_lb = p_grid->getNearestCellByXY(xmin, ymin);
p_cell_rt = p_grid->getNearestCellByXY(xmax, ymax);
left_bottom = p_cell_lb->idx;
right_top = p_cell_rt->idx;
cell_num = p_grid->cell_num;
row_start = right_top / cell_num;
col_end = right_top % cell_num;
row_end = left_bottom / cell_num;
col_start = left_bottom % cell_num;
anchor_idx = atomicAdd(&cnt_distribute_wrap, 1);
int cellNum = (row_end - row_start + 1)
* (col_end - col_start + 1);
for (int k = 0; k < cellNum; k++)
{
int i = row_start + k / (col_end - col_start + 1);
int j = col_start + k % (col_end - col_start + 1);
idx_cell = i * EDGE_CELL_NUM + j;
idx_tmp = atomicAdd(&cnt_queries_per_cell[idx_cell], 1);
#if IGNORE_CNT==0
atomicAdd(&check_tot_covered, 1);
#endif
#if SEG_CACHE == 0
if (idx_tmp >= local_node_q->LEN)
{
atomicAdd(&cnt_queries_per_cell[idx_cell], -1);
atomicAdd(&cnt_over_seg_query, 1);
continue;
}
#endif
if (idx_tmp < 0)
continue;
if (atomicCAS(&flag_cells_covered[idx_cell], 0, 1) == 0)
{
//atomicExch(&flag_cells_covered[idx_cell], 1);
idx_tmp_0 = atomicAdd(
&node_enqueue_query->tot_cells_covered, 1);
idx_cells_covered[idx_tmp_0] = idx_cell;
}
//queries_per_cell[idx_cell * LEN_SEG_CACHE_QUERY + idx_tmp] = anchor_idx;
while (true)
{
idx_last = local_node_q->last[idx_cell];
if (local_node_q->cnt[idx_last] < local_node_q->LEN)
{
idx_tmp = atomicAdd(&(local_node_q->cnt[idx_last]),
1);
if (idx_tmp + 1 >= local_node_q->LEN)
{
atomicAdd(&(local_node_q->cnt[idx_last]), -1);
#if SEG_CACHE == 0
tmp_me = &(local_node_q->mes[idx_last]);
if (tmp_me->next == -1)
{
if (atomicCAS(&tmp_me->lock, 0, tid + 1)
== 0)
{
if (atomicCAS(&tmp_me->next, -1, -1)
== -1)
{
int idx_anchor_in_pool;
int cnt_elem = atomicAdd(
&local_fqueue_q->cnt_elem,
-1);
int idx_in_queue = atomicAdd(
&local_fqueue_q->head, 1);
idx_anchor_in_pool =
local_fqueue_q->avail_idx_bkt[idx_in_queue
% local_fqueue_q->capacity];
if (idx_anchor_in_pool == -1)
{
printf(
"local_fqueue_q empty error!");
atomicAdd(&cnt_over_seg_query,
1);
atomicCAS(&tmp_me->lock,
tid + 1, 0);
break;
}
else
{
atomicExch(
&local_fqueue_q->avail_idx_bkt[idx_in_queue
% local_fqueue_q->capacity],
-1);
}
atomicExch(&tmp_me->next,
idx_anchor_in_pool);
atomicExch(
&(local_node_q->last[idx_cell]),
idx_anchor_in_pool);
}
atomicCAS(&tmp_me->lock, tid + 1, 0);
}
}
#else
atomicAdd(&cnt_over_seg_query, 1);
break;
#endif
}
else
{
if (idx_tmp > local_node_q->LEN)
printf("idx_tmp error\n");
local_node_q->pool[idx_last * local_node_q->LEN
+ idx_tmp] = anchor_idx;
break;
}
}
}
}
buffer_block_query[anchor_idx] = req_query;
check_dist_query_local++;
#if IGNORE_CNT==0
atomicAdd(&cnt_queries, 1);
#endif
__threadfence_system();
anchor += WRAPSIZE;
}
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence)
;
}
__syncthreads();
#else
//lwh
MemElementCollection<int> *local_node_q = node_enqueue_query->mtx_query_nodes;
CircularQueue * local_fqueue_q = node_enqueue_query->fqueue_query;
int anchor_idx;
anchor = tid;
while (anchor < buffer_block_size_query) {
req_query = p_buffer_block_query[anchor];
//lwh
anchor_idx = d_qd_anchor_local[anchor];
xmin = req_query.minX;
ymin = req_query.minY;
xmax = req_query.maxX;
ymax = req_query.maxY;
p_cell_lb = p_grid->getNearestCellByXY(xmin, ymin);
p_cell_rt = p_grid->getNearestCellByXY(xmax, ymax);
left_bottom = p_cell_lb->idx;
right_top = p_cell_rt->idx;
cell_num = p_grid->cell_num;
row_start = right_top / cell_num;
col_end = right_top % cell_num;
row_end = left_bottom / cell_num;
col_start = left_bottom % cell_num;
anchor_idx = atomicAdd(&cnt_distribute_wrap, 1);
int cellNum = (row_end - row_start + 1) * (col_end - col_start + 1);
int count = cellNum / QUEUE_SEG_LEN + 1;
if (count >= MQUEUE_SIZE) {
printf("count over MQUEUE_SIZE\n");
continue;
}
#if IGNORE_CNT == 0
atomicAdd(multiqueue + count, 1);
#endif
#if USE_DPPROCESS == 1
DpProcess<<<1, cellNum>>>(place_holder_query_dispatch_local, anchor, row_start, row_end, col_start, col_end, dev_p_gconfig, node_enqueue_query);
cudaDeviceSynchronize();
#else
for (int k = 0; k < cellNum; k++) {
int i = row_start + k / (col_end - col_start + 1);
int j = col_start + k % (col_end - col_start + 1);
idx_cell = i * EDGE_CELL_NUM + j;
idx_tmp = atomicAdd(&cnt_queries_per_cell[idx_cell], 1);
#if IGNORE_CNT == 0
atomicAdd(&check_tot_covered, 1);
#endif
#if SEG_CACHE == 0
if(idx_tmp >= local_node_q->LEN)
{
atomicAdd(&cnt_queries_per_cell[idx_cell], -1);
atomicAdd(&cnt_over_seg_query, 1);
continue;
}
#endif
if (atomicCAS(&flag_cells_covered[idx_cell], 0, 1) == 0) {
//atomicExch(&flag_cells_covered[idx_cell], 1);
idx_tmp_0 = atomicAdd(&node_enqueue_query->tot_cells_covered, 1);
idx_cells_covered[idx_tmp_0] = idx_cell;
}
//queries_per_cell[idx_cell * LEN_SEG_CACHE_QUERY + idx_tmp] = anchor;
while (true) {
idx_last = local_node_q->last[idx_cell];
if (local_node_q->cnt[idx_last] < local_node_q->LEN) {
idx_tmp = atomicAdd(&(local_node_q->cnt[idx_last]), 1);
if (idx_tmp + 1 >= local_node_q->LEN) {
atomicAdd(&(local_node_q->cnt[idx_last]), -1);
#if SEG_CACHE == 0
tmp_me = &(local_node_q->mes[idx_last]);
if(tmp_me->next == -1)
{
if(atomicCAS(&tmp_me->lock,0,tid+1) == 0)
{
if(atomicCAS(&tmp_me->next,-1,-1) == -1)
{
int idx_anchor_in_pool;
int cnt_elem = atomicAdd(&local_fqueue_q->cnt_elem, -1);
int idx_in_queue = atomicAdd(&local_fqueue_q->head, 1);
idx_anchor_in_pool = local_fqueue_q->avail_idx_bkt[idx_in_queue % local_fqueue_q->capacity];
if(idx_anchor_in_pool == -1)
{
printf("local_fqueue_q empty error!");
atomicAdd(&cnt_over_seg_query, 1);
atomicCAS(&tmp_me->lock,tid+1,0);
break;
}
else
{
atomicExch(&local_fqueue_q->avail_idx_bkt[idx_in_queue % local_fqueue_q->capacity], -1);
}
atomicExch(&tmp_me->next, idx_anchor_in_pool);
atomicExch(&(local_node_q->last[idx_cell]), idx_anchor_in_pool);
}
atomicCAS(&tmp_me->lock,tid+1,0);
}
}
#else
atomicAdd(&cnt_over_seg_query, 1);
break;
#endif
} else {
if (idx_tmp > local_node_q->LEN)
printf("idx_tmp error\n");
local_node_q->pool[idx_last * local_node_q->LEN + idx_tmp] = anchor_idx;
break;
}
}
}
}
#endif
buffer_block_query[anchor] = req_query;
check_dist_query_local++;
#if IGNORE_CNT == 0
atomicAdd(&cnt_queries, 1);
#endif
anchor += blockDim.x * gridDim.x;
}
#endif
// if(tid == 0)
// {
// for(int k = 0; k<MQUEUE_SIZE; k++){
// printf("%d ", multiqueue[k]);
// }
// printf("\n");
// }
#else //QUERY PATTHERN ELSE
MemElementCollection<int>* local_node_q = node_enqueue_query->mtx_query_nodes;
atomicExch(&local_node_q->globalCnt, 0);
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0)
{
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
anchor = tid;
while (anchor < buffer_block_size_query)
{
req_query = p_buffer_block_query[anchor];
atomicAdd(&local_node_q->globalCnt,1);
buffer_block_query[anchor] = req_query;
check_dist_query_local++;
#if IGNORE_CNT==0
atomicAdd(&cnt_queries,1);
#endif
anchor += blockDim.x * gridDim.x;
}
#endif
check_cnt_enqueue++;
__threadfence_system();
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
// anchor = tid;
// while(anchor < tot_cells_covered)
// {
// flag_cells_covered[ idx_cells_covered[anchor] ] = 0;
// anchor += blockDim.x * gridDim.x;
// }
//
// __syncthreads();
// barrier_fence += barrier_step;
// if (tid_b == 0)
// {
// atomicAdd(&barrier_dist, 1);
// while (barrier_dist < barrier_fence);
// }
// __syncthreads();
offset_buffer_query += buffer_block_size_query;
if (tid == 0) {
end = clock64();
long long int temp_sum_time = (end - start);
atomicAdd(&dis_time_per_period, (int) (end - start));
distribute_sumtime += temp_sum_time;
printf("d2 %.4f ms\n",
(double) temp_sum_time / p_config->clockRate);
atomicExch(&offset_buffer_query_rec, offset_buffer_query);
if (flag_switch_dist == 0) {
atomicExch(&req_cache_query->cnt0, cnt_enqueue_query);
atomicExch(&req_cache_query->token0, 1);
}
if (flag_switch_dist == 1) {
atomicExch(&req_cache_query->cnt1, cnt_enqueue_query);
atomicExch(&req_cache_query->token1, 1);
}
node_enqueue_query = NULL;
atomicAdd(&cnt_enqueue_query, 1);
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
//end = clock64();
if (tid == 0) {
*dev_cnt_update = offset_buffer_update;
*dev_cnt_query = offset_buffer_query;
}
if (tid == 0 && offset_buffer_update >= TOTAL_UPDATE) {
atomicExch(&p_config->terminalFlag, 1);
atomicExch(&buffer_exhausted, 1);
if (start != -1) {
printf("\nDistributorKernel clock: %f ms\n",
((double) distribute_sumtime) / p_config->clockRate);
}
printf("\n");
printf("len_seg_cache_update_local: %d\n", len_seg_cache_update_local);
printf("cnt_enqueue_update: %d\n", cnt_enqueue_update);
printf("cnt_enqueue_query: %d\n", cnt_enqueue_query);
printf("exp_hunger_dist0: %d\n", exp_hunger_dist0);
printf("exp_hunger_dist1: %d\n", exp_hunger_dist1);
printf("offset_buffer_update: %d\n", offset_buffer_update);
printf("dev_cnt_update: %d\n", *dev_cnt_update);
printf("offset_buffer_query: %d\n", offset_buffer_query);
printf("dev_cnt_query: %d\n", *dev_cnt_query);
printf("len_seg_cache_query in Distribute Kernel: %d\n",
LEN_SEG_CACHE_QUERY);
printf("buffer_block_size_update: %d\n", buffer_block_size_update);
printf("buffer_block_size_query: %d\n", buffer_block_size_query);
printf("exp_new_cell_null: %d\n", exp_new_cell_null);
printf("exp_update_in_spec_cell: %d\n", exp_update_in_spec_cell);
printf("exp_old_cell_null: %d\n", exp_old_cell_null);
printf("check_tot_covered: %d %d\n",
check_tot_covered / cnt_enqueue_query, cnt_enqueue_query);
printf("check_dist_query_local: %d\n", check_dist_query_local);
printf("check_cnt_enqueue: %d\n", check_cnt_enqueue);
printf("check_lb_null: %d\n", check_lb_null);
printf("check_rt_null: %d\n", check_rt_null);
printf("cnt_over_seg_update: %d\n", cnt_over_seg_update);
printf("cnt_over_seg_query: %d\n", cnt_over_seg_query);
printf("cnt_queries: %d\n", cnt_queries);
// printf("Sizeof UpdateType: %d\n", sizeof(UpdateType));
// printf("Sizeof QueryType: %d\n", sizeof(QueryType));
// printf("Sizeof ObjBox: %d\n", sizeof(ObjBox));
// printf("Sizeof QueryQNode: %d\n", sizeof(QueryQNode));
// printf("Sizeof QueryType *: %d\n", sizeof(QueryType *));
// printf("Sizeof int *: %d\n", sizeof(int *));
// printf("Sizeof SIEntry: %d\n", sizeof(SIEntry));
}
__syncthreads();
barrier_fence += barrier_step;
if (tid_b == 0) {
atomicAdd(&barrier_dist, 1);
while (barrier_dist < barrier_fence);
}
__syncthreads();
if (tid == 0) {
atomicExch(&barrier_dist, 0);
}
}
|
9566f40096dcdb81b3376b3d3ac79cf3991c6cb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "SmoothHistogramPdf.hh"
MEM_CONSTANT fptype* dev_base_histograms[100]; // Multiple histograms for the case of multiple PDFs
MEM_CONSTANT fptype* dev_smoothed_histograms[100];
unsigned int SmoothHistogramPdf::totalHistograms = 0;
EXEC_TARGET int dev_powi (int base, int exp) {
int ret = 1;
for (int i = 0; i < exp; ++i) ret *= base;
return ret;
}
EXEC_TARGET fptype device_EvalHistogram (fptype* evt, fptype* p, unsigned int* indices) {
// Structure is
// nP smoothingIndex totalHistograms (limit1 step1 bins1) (limit2 step2 bins2) nO o1 o2
// where limit and step are indices into functorConstants.
int numVars = indices[indices[0] + 1];
int globalBinNumber = 0;
int previous = 1;
int myHistogramIndex = indices[2]; // 1 only used for smoothing
for (int i = 0; i < numVars; ++i) {
int varIndex = indices[indices[0] + 2 + i];
int lowerBoundIdx = 3*(i+1);
//if (gpuDebug & 1) printf("[%i, %i] Smoothed: %i %i %i\n", BLOCKIDX, THREADIDX, i, varIndex, indices[varIndex]);
fptype currVariable = evt[varIndex];
fptype lowerBound = functorConstants[indices[lowerBoundIdx + 0]];
fptype step = functorConstants[indices[lowerBoundIdx + 1]];
currVariable -= lowerBound;
currVariable /= step;
//if (gpuDebug & 1) printf("[%i, %i] Smoothed: %i %i %f %f %f %f\n", BLOCKIDX, THREADIDX, i, varIndex, currVariable, lowerBound, step, evt[varIndex]);
int localBinNumber = (int) FLOOR(currVariable);
globalBinNumber += previous * localBinNumber;
previous *= indices[lowerBoundIdx + 2];
}
fptype* myHistogram = dev_smoothed_histograms[myHistogramIndex];
fptype ret = myHistogram[globalBinNumber];
//if ((gpuDebug & 1) && (evt[8] < 0.5) && (paramIndices + debugParamIndex == indices)) printf("Smoothed: %f %f %f %i %f\n", evt[6], evt[7], myHistogram[globalBinNumber], globalBinNumber, dev_base_histograms[myHistogramIndex][globalBinNumber]);
//if (gpuDebug & 1) printf("Smoothed: %f %f %f %i %f\n", evt[0], evt[1], myHistogram[globalBinNumber], globalBinNumber, dev_base_histograms[myHistogramIndex][globalBinNumber]);
//if (gpuDebug & 1) printf("Smoothed: %f %f %f %i %f %f\n", evt[0], evt[1], ret, globalBinNumber, dev_base_histograms[myHistogramIndex][globalBinNumber], p[indices[1]]);
return ret;
}
struct Smoother {
int parameters;
EXEC_TARGET fptype operator () (int globalBin) {
unsigned int* indices = paramIndices + parameters;
int numVars = indices[indices[0] + 1];
fptype smoothing = hipArray[indices[1]];
int histIndex = indices[2];
fptype* myHistogram = dev_base_histograms[histIndex];
fptype centralValue = myHistogram[globalBin];
fptype otherBinsTotal = 0;
int numSurroundingBins = 0;
int otherBins = dev_powi(3, numVars);
for (int i = 0; i < otherBins; ++i) {
int currBin = globalBin;
int localPrevious = 1;
int trackingBin = globalBin;
bool offSomeAxis = false;
for (int v = 0; v < numVars; ++v) {
//int lowerBoundIdx = 3*(i+1);
//int localNumBins = indices[6 + v*4];
int localNumBins = indices[3*(v+1) + 2];
int offset = ((i / dev_powi(3, v)) % 3) - 1;
currBin += offset * localPrevious;
localPrevious *= localNumBins;
int currVarBin = trackingBin % localNumBins;
trackingBin /= localNumBins;
if (currVarBin + offset < 0) offSomeAxis = true;
if (currVarBin + offset >= localNumBins) offSomeAxis = true;
}
if (currBin == globalBin) continue;
if (offSomeAxis) continue; // Out of bounds
numSurroundingBins++;
otherBinsTotal += myHistogram[currBin];
}
centralValue += otherBinsTotal*smoothing;
centralValue /= (1 + numSurroundingBins * smoothing);
//if (7010 == globalBin) printf("Smoothing: %f %f %f %i %f\n", myHistogram[globalBin], otherBinsTotal, smoothing, numSurroundingBins, centralValue);
return centralValue;
}
};
MEM_DEVICE device_function_ptr ptr_to_EvalHistogram = device_EvalHistogram;
__host__ SmoothHistogramPdf::SmoothHistogramPdf (std::string n, BinnedDataSet* hist, Variable* smoothing)
: GooPdf(0, n)
{
int numVars = hist->numVariables();
int numConstants = 2*numVars;
registerConstants(numConstants);
host_constants = new fptype[numConstants];
totalEvents = 0;
std::vector<unsigned int> pindices;
pindices.push_back(registerParameter(smoothing));
pindices.push_back(totalHistograms);
int varIndex = 0;
for (varConstIt var = hist->varsBegin(); var != hist->varsEnd(); ++var) {
registerObservable(*var);
//pindices.push_back((*var)->index);
pindices.push_back(cIndex + 2*varIndex + 0);
pindices.push_back(cIndex + 2*varIndex + 1);
pindices.push_back((*var)->numbins);
host_constants[2*varIndex + 0] = (*var)->lowerlimit; // NB, do not put cIndex here, it is accounted for by the offset in MEMCPY_TO_SYMBOL below.
host_constants[2*varIndex + 1] = ((*var)->upperlimit - (*var)->lowerlimit) / (*var)->numbins;
varIndex++;
}
unsigned int numbins = hist->getNumBins();
thrust::host_vector<fptype> host_histogram;
for (unsigned int i = 0; i < numbins; ++i) {
fptype curr = hist->getBinContent(i);
host_histogram.push_back(curr);
totalEvents += curr;
}
MEMCPY_TO_SYMBOL(functorConstants, host_constants, numConstants*sizeof(fptype), cIndex*sizeof(fptype), hipMemcpyHostToDevice);
if (totalEvents > 0) copyHistogramToDevice(host_histogram);
else std::cout << "Warning: Empty histogram supplied to " << getName() << " not copied to device. Expect copyHistogramToDevice call later.\n";
GET_FUNCTION_ADDR(ptr_to_EvalHistogram);
initialise(pindices);
}
fptype* pointerToFirst (thrust::device_vector<fptype>* hist) {
return (&((*hist)[0])).get();
}
fptype* pointerToFirst (thrust::host_vector<fptype>* hist) {
// (*hist) is the host_vector.
// (*hist)[0] is a 'reference' - Thrust class, not ordinary C++ reference -
// to the first element of the vector.
// &((*hist)[0]) is a 'Pointer', as defined by the host_vector, to the location
// of the 'reference'. Fortunately this is by default fptype*!
return &((*hist)[0]);
}
__host__ void SmoothHistogramPdf::copyHistogramToDevice (thrust::host_vector<fptype>& host_histogram) {
dev_base_histogram = new DEVICE_VECTOR<fptype>(host_histogram);
dev_smoothed_histogram = new DEVICE_VECTOR<fptype>(host_histogram);
static fptype* dev_address[1];
dev_address[0] = pointerToFirst(dev_base_histogram);
MEMCPY_TO_SYMBOL(dev_base_histograms, dev_address, sizeof(fptype*), totalHistograms*sizeof(fptype*), hipMemcpyHostToDevice);
dev_address[0] = pointerToFirst(dev_smoothed_histogram);
MEMCPY_TO_SYMBOL(dev_smoothed_histograms, dev_address, sizeof(fptype*), totalHistograms*sizeof(fptype*), hipMemcpyHostToDevice);
totalHistograms++;
int expectedBins = 1;
for (unsigned int varIndex = 0; varIndex < observables.size(); ++varIndex) {
expectedBins *= observables[varIndex]->numbins;
}
if (expectedBins != host_histogram.size()) {
std::cout << "Warning: Histogram supplied to " << getName() << " has " << host_histogram.size() << " bins, expected " << expectedBins << " - may indicate a problem.\n";
}
}
__host__ fptype SmoothHistogramPdf::normalise () const {
Smoother smoother;
smoother.parameters = parameters;
thrust::counting_iterator<int> binIndex(0);
thrust::transform(binIndex,
binIndex + dev_base_histogram->size(),
dev_smoothed_histogram->begin(),
smoother);
//return totalEvents;
fptype ret = thrust::reduce(dev_smoothed_histogram->begin(), dev_smoothed_histogram->end());
for (unsigned int varIndex = 0; varIndex < observables.size(); ++varIndex) {
ret *= host_constants[2*varIndex + 1]; // Bin size cached by constructor.
}
//if (cpuDebug & 1) std::cout << "Normalising " << getName() << " " << host_params[host_indices[parameters + 1]] << " " << ret << std::endl;
host_normalisation[parameters] = 1.0/ret;
return ret;
}
| 9566f40096dcdb81b3376b3d3ac79cf3991c6cb0.cu | #include "SmoothHistogramPdf.hh"
MEM_CONSTANT fptype* dev_base_histograms[100]; // Multiple histograms for the case of multiple PDFs
MEM_CONSTANT fptype* dev_smoothed_histograms[100];
unsigned int SmoothHistogramPdf::totalHistograms = 0;
EXEC_TARGET int dev_powi (int base, int exp) {
int ret = 1;
for (int i = 0; i < exp; ++i) ret *= base;
return ret;
}
EXEC_TARGET fptype device_EvalHistogram (fptype* evt, fptype* p, unsigned int* indices) {
// Structure is
// nP smoothingIndex totalHistograms (limit1 step1 bins1) (limit2 step2 bins2) nO o1 o2
// where limit and step are indices into functorConstants.
int numVars = indices[indices[0] + 1];
int globalBinNumber = 0;
int previous = 1;
int myHistogramIndex = indices[2]; // 1 only used for smoothing
for (int i = 0; i < numVars; ++i) {
int varIndex = indices[indices[0] + 2 + i];
int lowerBoundIdx = 3*(i+1);
//if (gpuDebug & 1) printf("[%i, %i] Smoothed: %i %i %i\n", BLOCKIDX, THREADIDX, i, varIndex, indices[varIndex]);
fptype currVariable = evt[varIndex];
fptype lowerBound = functorConstants[indices[lowerBoundIdx + 0]];
fptype step = functorConstants[indices[lowerBoundIdx + 1]];
currVariable -= lowerBound;
currVariable /= step;
//if (gpuDebug & 1) printf("[%i, %i] Smoothed: %i %i %f %f %f %f\n", BLOCKIDX, THREADIDX, i, varIndex, currVariable, lowerBound, step, evt[varIndex]);
int localBinNumber = (int) FLOOR(currVariable);
globalBinNumber += previous * localBinNumber;
previous *= indices[lowerBoundIdx + 2];
}
fptype* myHistogram = dev_smoothed_histograms[myHistogramIndex];
fptype ret = myHistogram[globalBinNumber];
//if ((gpuDebug & 1) && (evt[8] < 0.5) && (paramIndices + debugParamIndex == indices)) printf("Smoothed: %f %f %f %i %f\n", evt[6], evt[7], myHistogram[globalBinNumber], globalBinNumber, dev_base_histograms[myHistogramIndex][globalBinNumber]);
//if (gpuDebug & 1) printf("Smoothed: %f %f %f %i %f\n", evt[0], evt[1], myHistogram[globalBinNumber], globalBinNumber, dev_base_histograms[myHistogramIndex][globalBinNumber]);
//if (gpuDebug & 1) printf("Smoothed: %f %f %f %i %f %f\n", evt[0], evt[1], ret, globalBinNumber, dev_base_histograms[myHistogramIndex][globalBinNumber], p[indices[1]]);
return ret;
}
struct Smoother {
int parameters;
EXEC_TARGET fptype operator () (int globalBin) {
unsigned int* indices = paramIndices + parameters;
int numVars = indices[indices[0] + 1];
fptype smoothing = cudaArray[indices[1]];
int histIndex = indices[2];
fptype* myHistogram = dev_base_histograms[histIndex];
fptype centralValue = myHistogram[globalBin];
fptype otherBinsTotal = 0;
int numSurroundingBins = 0;
int otherBins = dev_powi(3, numVars);
for (int i = 0; i < otherBins; ++i) {
int currBin = globalBin;
int localPrevious = 1;
int trackingBin = globalBin;
bool offSomeAxis = false;
for (int v = 0; v < numVars; ++v) {
//int lowerBoundIdx = 3*(i+1);
//int localNumBins = indices[6 + v*4];
int localNumBins = indices[3*(v+1) + 2];
int offset = ((i / dev_powi(3, v)) % 3) - 1;
currBin += offset * localPrevious;
localPrevious *= localNumBins;
int currVarBin = trackingBin % localNumBins;
trackingBin /= localNumBins;
if (currVarBin + offset < 0) offSomeAxis = true;
if (currVarBin + offset >= localNumBins) offSomeAxis = true;
}
if (currBin == globalBin) continue;
if (offSomeAxis) continue; // Out of bounds
numSurroundingBins++;
otherBinsTotal += myHistogram[currBin];
}
centralValue += otherBinsTotal*smoothing;
centralValue /= (1 + numSurroundingBins * smoothing);
//if (7010 == globalBin) printf("Smoothing: %f %f %f %i %f\n", myHistogram[globalBin], otherBinsTotal, smoothing, numSurroundingBins, centralValue);
return centralValue;
}
};
MEM_DEVICE device_function_ptr ptr_to_EvalHistogram = device_EvalHistogram;
__host__ SmoothHistogramPdf::SmoothHistogramPdf (std::string n, BinnedDataSet* hist, Variable* smoothing)
: GooPdf(0, n)
{
int numVars = hist->numVariables();
int numConstants = 2*numVars;
registerConstants(numConstants);
host_constants = new fptype[numConstants];
totalEvents = 0;
std::vector<unsigned int> pindices;
pindices.push_back(registerParameter(smoothing));
pindices.push_back(totalHistograms);
int varIndex = 0;
for (varConstIt var = hist->varsBegin(); var != hist->varsEnd(); ++var) {
registerObservable(*var);
//pindices.push_back((*var)->index);
pindices.push_back(cIndex + 2*varIndex + 0);
pindices.push_back(cIndex + 2*varIndex + 1);
pindices.push_back((*var)->numbins);
host_constants[2*varIndex + 0] = (*var)->lowerlimit; // NB, do not put cIndex here, it is accounted for by the offset in MEMCPY_TO_SYMBOL below.
host_constants[2*varIndex + 1] = ((*var)->upperlimit - (*var)->lowerlimit) / (*var)->numbins;
varIndex++;
}
unsigned int numbins = hist->getNumBins();
thrust::host_vector<fptype> host_histogram;
for (unsigned int i = 0; i < numbins; ++i) {
fptype curr = hist->getBinContent(i);
host_histogram.push_back(curr);
totalEvents += curr;
}
MEMCPY_TO_SYMBOL(functorConstants, host_constants, numConstants*sizeof(fptype), cIndex*sizeof(fptype), cudaMemcpyHostToDevice);
if (totalEvents > 0) copyHistogramToDevice(host_histogram);
else std::cout << "Warning: Empty histogram supplied to " << getName() << " not copied to device. Expect copyHistogramToDevice call later.\n";
GET_FUNCTION_ADDR(ptr_to_EvalHistogram);
initialise(pindices);
}
fptype* pointerToFirst (thrust::device_vector<fptype>* hist) {
return (&((*hist)[0])).get();
}
fptype* pointerToFirst (thrust::host_vector<fptype>* hist) {
// (*hist) is the host_vector.
// (*hist)[0] is a 'reference' - Thrust class, not ordinary C++ reference -
// to the first element of the vector.
// &((*hist)[0]) is a 'Pointer', as defined by the host_vector, to the location
// of the 'reference'. Fortunately this is by default fptype*!
return &((*hist)[0]);
}
__host__ void SmoothHistogramPdf::copyHistogramToDevice (thrust::host_vector<fptype>& host_histogram) {
dev_base_histogram = new DEVICE_VECTOR<fptype>(host_histogram);
dev_smoothed_histogram = new DEVICE_VECTOR<fptype>(host_histogram);
static fptype* dev_address[1];
dev_address[0] = pointerToFirst(dev_base_histogram);
MEMCPY_TO_SYMBOL(dev_base_histograms, dev_address, sizeof(fptype*), totalHistograms*sizeof(fptype*), cudaMemcpyHostToDevice);
dev_address[0] = pointerToFirst(dev_smoothed_histogram);
MEMCPY_TO_SYMBOL(dev_smoothed_histograms, dev_address, sizeof(fptype*), totalHistograms*sizeof(fptype*), cudaMemcpyHostToDevice);
totalHistograms++;
int expectedBins = 1;
for (unsigned int varIndex = 0; varIndex < observables.size(); ++varIndex) {
expectedBins *= observables[varIndex]->numbins;
}
if (expectedBins != host_histogram.size()) {
std::cout << "Warning: Histogram supplied to " << getName() << " has " << host_histogram.size() << " bins, expected " << expectedBins << " - may indicate a problem.\n";
}
}
__host__ fptype SmoothHistogramPdf::normalise () const {
Smoother smoother;
smoother.parameters = parameters;
thrust::counting_iterator<int> binIndex(0);
thrust::transform(binIndex,
binIndex + dev_base_histogram->size(),
dev_smoothed_histogram->begin(),
smoother);
//return totalEvents;
fptype ret = thrust::reduce(dev_smoothed_histogram->begin(), dev_smoothed_histogram->end());
for (unsigned int varIndex = 0; varIndex < observables.size(); ++varIndex) {
ret *= host_constants[2*varIndex + 1]; // Bin size cached by constructor.
}
//if (cpuDebug & 1) std::cout << "Normalising " << getName() << " " << host_params[host_indices[parameters + 1]] << " " << ret << std::endl;
host_normalisation[parameters] = 1.0/ret;
return ret;
}
|
39882e7511d7bdb703ba7caa13c2a499db5b00ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define N 16
__global__ void k(int* in)
{
if(threadIdx.x < N)
if(threadIdx.x % 2 == 0)
in[0] = 0;
else
in[0] = 0;
}
int main()
{
int* din;
hipMalloc((void**) &din, N*sizeof(int));
hipLaunchKernelGGL(( k), dim3(1),dim3(N), 0, 0, din);
} | 39882e7511d7bdb703ba7caa13c2a499db5b00ab.cu | #define N 16
__global__ void k(int* in)
{
if(threadIdx.x < N)
if(threadIdx.x % 2 == 0)
in[0] = 0;
else
in[0] = 0;
}
int main()
{
int* din;
cudaMalloc((void**) &din, N*sizeof(int));
k<<<1,N>>>(din);
} |
d20e5cf348129c327e786606605e480a04f16bb9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/boolean_unmask_ops.h"
namespace caffe2 {
namespace {
__global__ void ComputeIndicesKernel(
const int numMasks,
const int maskSize,
int* indices,
bool* const masks[]) {
CUDA_1D_KERNEL_LOOP(i, maskSize) {
for (int j = 0; j < numMasks; ++j) {
if (masks[j][i]) {
indices[i] = j;
return;
}
}
CUDA_KERNEL_ASSERT(false);
}
}
__global__ void FillValuesKernel(
const int numMasks,
const int maskSize,
const size_t itemSize,
const int* indices,
char* const values[],
int* valueSizes,
char* dest) {
CUDA_1D_KERNEL_LOOP(j, numMasks) {
int k = 0;
for (int i = 0; i < maskSize; ++i) {
if (indices[i] == j) {
for (int h = 0; h < itemSize; ++h) {
dest[i * itemSize + h] = values[j][k * itemSize + h];
}
++k;
}
}
CUDA_KERNEL_ASSERT(valueSizes[j] == k);
}
}
} // namespace
template <>
class BooleanUnmaskOp<CUDAContext> final : public Operator<CUDAContext> {
public:
BooleanUnmaskOp(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {}
bool RunOnDevice() override {
int maskSize = Input(0).size();
int numMasks = InputSize() / 2;
const auto& meta = Input(1).meta();
auto* out = Output(0);
out->Resize(maskSize);
auto* dest = (char*)out->raw_mutable_data(meta);
hostMasks_.Resize(numMasks);
auto* hostMasksData = hostMasks_.mutable_data<bool*>();
hostValues_.Resize(numMasks);
auto* hostValuesData = hostValues_.mutable_data<char*>();
hostValueSizes_.Resize(numMasks);
auto* hostValueSizesData = hostValueSizes_.mutable_data<int>();
for (int i = 0; i < numMasks; ++i) {
auto& mask = Input(i * 2);
CAFFE_ENFORCE_EQ(mask.ndim(), 1);
CAFFE_ENFORCE_EQ(mask.size(), maskSize);
hostMasksData[i] = const_cast<bool*>(mask.data<bool>());
const auto& value = Input(i * 2 + 1);
CAFFE_ENFORCE_EQ(value.ndim(), 1);
hostValuesData[i] = (char*)value.raw_data();
hostValueSizesData[i] = value.size();
}
masks_.CopyFrom(hostMasks_);
values_.CopyFrom(hostValues_);
valueSizes_.CopyFrom(hostValueSizes_);
indices_.Resize(maskSize);
auto* indicesData = indices_.mutable_data<int>();
hipLaunchKernelGGL(( ComputeIndicesKernel),
dim3(min(maskSize, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
numMasks, maskSize, indicesData, masks_.data<bool*>());
auto* valueSizesData = valueSizes_.mutable_data<int>();
hipLaunchKernelGGL(( FillValuesKernel),
dim3(min(numMasks, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
numMasks,
maskSize,
meta.itemsize(),
indicesData,
values_.data<char*>(),
valueSizesData,
dest);
return true;
}
private:
Tensor indices_{CUDA};
Tensor masks_{CUDA};
Tensor values_{CUDA};
Tensor valueSizes_{CUDA};
Tensor hostMasks_{CPU};
Tensor hostValues_{CPU};
Tensor hostValueSizes_{CPU};
};
REGISTER_CUDA_OPERATOR(BooleanUnmask, BooleanUnmaskOp<CUDAContext>);
} // caffe2
| d20e5cf348129c327e786606605e480a04f16bb9.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/boolean_unmask_ops.h"
namespace caffe2 {
namespace {
__global__ void ComputeIndicesKernel(
const int numMasks,
const int maskSize,
int* indices,
bool* const masks[]) {
CUDA_1D_KERNEL_LOOP(i, maskSize) {
for (int j = 0; j < numMasks; ++j) {
if (masks[j][i]) {
indices[i] = j;
return;
}
}
CUDA_KERNEL_ASSERT(false);
}
}
__global__ void FillValuesKernel(
const int numMasks,
const int maskSize,
const size_t itemSize,
const int* indices,
char* const values[],
int* valueSizes,
char* dest) {
CUDA_1D_KERNEL_LOOP(j, numMasks) {
int k = 0;
for (int i = 0; i < maskSize; ++i) {
if (indices[i] == j) {
for (int h = 0; h < itemSize; ++h) {
dest[i * itemSize + h] = values[j][k * itemSize + h];
}
++k;
}
}
CUDA_KERNEL_ASSERT(valueSizes[j] == k);
}
}
} // namespace
template <>
class BooleanUnmaskOp<CUDAContext> final : public Operator<CUDAContext> {
public:
BooleanUnmaskOp(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {}
bool RunOnDevice() override {
int maskSize = Input(0).size();
int numMasks = InputSize() / 2;
const auto& meta = Input(1).meta();
auto* out = Output(0);
out->Resize(maskSize);
auto* dest = (char*)out->raw_mutable_data(meta);
hostMasks_.Resize(numMasks);
auto* hostMasksData = hostMasks_.mutable_data<bool*>();
hostValues_.Resize(numMasks);
auto* hostValuesData = hostValues_.mutable_data<char*>();
hostValueSizes_.Resize(numMasks);
auto* hostValueSizesData = hostValueSizes_.mutable_data<int>();
for (int i = 0; i < numMasks; ++i) {
auto& mask = Input(i * 2);
CAFFE_ENFORCE_EQ(mask.ndim(), 1);
CAFFE_ENFORCE_EQ(mask.size(), maskSize);
hostMasksData[i] = const_cast<bool*>(mask.data<bool>());
const auto& value = Input(i * 2 + 1);
CAFFE_ENFORCE_EQ(value.ndim(), 1);
hostValuesData[i] = (char*)value.raw_data();
hostValueSizesData[i] = value.size();
}
masks_.CopyFrom(hostMasks_);
values_.CopyFrom(hostValues_);
valueSizes_.CopyFrom(hostValueSizes_);
indices_.Resize(maskSize);
auto* indicesData = indices_.mutable_data<int>();
ComputeIndicesKernel<<<
min(maskSize, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
numMasks, maskSize, indicesData, masks_.data<bool*>());
auto* valueSizesData = valueSizes_.mutable_data<int>();
FillValuesKernel<<<
min(numMasks, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
numMasks,
maskSize,
meta.itemsize(),
indicesData,
values_.data<char*>(),
valueSizesData,
dest);
return true;
}
private:
Tensor indices_{CUDA};
Tensor masks_{CUDA};
Tensor values_{CUDA};
Tensor valueSizes_{CUDA};
Tensor hostMasks_{CPU};
Tensor hostValues_{CPU};
Tensor hostValueSizes_{CPU};
};
REGISTER_CUDA_OPERATOR(BooleanUnmask, BooleanUnmaskOp<CUDAContext>);
} // caffe2
|
94d96806d426b5f50bb084eb92a92dbb545b28d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8) {
comp += powf(-1.3388E-15f + expf((+1.3778E-36f / (var_1 - -1.9198E-12f))), +1.0623E-43f);
if (comp >= (+1.3155E-37f / -1.8941E-19f * (var_2 - (var_3 + (var_4 / var_5))))) {
float tmp_1 = +0.0f / (var_6 * +0.0f + (-1.5949E-42f * (+1.7799E-43f * +1.1814E2f)));
comp += tmp_1 * expf(var_7 - (+0.0f - var_8));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9);
hipDeviceSynchronize();
return 0;
}
| 94d96806d426b5f50bb084eb92a92dbb545b28d0.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8) {
comp += powf(-1.3388E-15f + expf((+1.3778E-36f / (var_1 - -1.9198E-12f))), +1.0623E-43f);
if (comp >= (+1.3155E-37f / -1.8941E-19f * (var_2 - (var_3 + (var_4 / var_5))))) {
float tmp_1 = +0.0f / (var_6 * +0.0f + (-1.5949E-42f * (+1.7799E-43f * +1.1814E2f)));
comp += tmp_1 * expf(var_7 - (+0.0f - var_8));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9);
cudaDeviceSynchronize();
return 0;
}
|
6a0af042cd2c126088e035aee4a26b1c6cd0aba4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <petsc/private/cudavecimpl.h>
#include <petsccublas.h>
#include <../src/vec/is/sf/impls/basic/sfpack.h>
#include <mpi.h>
#include <nvshmem.h>
#include <nvshmemx.h>
PetscErrorCode PetscNvshmemInitializeCheck(void)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (!PetscNvshmemInitialized) { /* Note NVSHMEM does not provide a routine to check whether it is initialized */
nvshmemx_init_attr_t attr;
attr.mpi_comm = &PETSC_COMM_WORLD;
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr);
ierr = nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM,&attr);CHKERRQ(ierr);
PetscNvshmemInitialized = PETSC_TRUE;
PetscBeganNvshmem = PETSC_TRUE;
}
PetscFunctionReturn(0);
}
PetscErrorCode PetscNvshmemMalloc(size_t size, void** ptr)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscNvshmemInitializeCheck();CHKERRQ(ierr);
*ptr = nvshmem_malloc(size);
if (!*ptr) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"nvshmem_malloc() failed to allocate %zu bytes",size);
PetscFunctionReturn(0);
}
PetscErrorCode PetscNvshmemCalloc(size_t size, void**ptr)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscNvshmemInitializeCheck();CHKERRQ(ierr);
*ptr = nvshmem_calloc(size,1);
if (!*ptr) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"nvshmem_calloc() failed to allocate %zu bytes",size);
PetscFunctionReturn(0);
}
PetscErrorCode PetscNvshmemFree_Private(void* ptr)
{
PetscFunctionBegin;
nvshmem_free(ptr);
PetscFunctionReturn(0);
}
PetscErrorCode PetscNvshmemFinalize(void)
{
PetscFunctionBegin;
nvshmem_finalize();
PetscFunctionReturn(0);
}
/* Free nvshmem related fields in the SF */
PetscErrorCode PetscSFReset_Basic_NVSHMEM(PetscSF sf)
{
PetscErrorCode ierr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
PetscFunctionBegin;
ierr = PetscFree2(bas->leafsigdisp,bas->leafbufdisp);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->leafbufdisp_d);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->leafsigdisp_d);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->iranks_d);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->ioffset_d);CHKERRQ(ierr);
ierr = PetscFree2(sf->rootsigdisp,sf->rootbufdisp);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->rootbufdisp_d);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->rootsigdisp_d);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->ranks_d);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->roffset_d);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* Set up NVSHMEM related fields for an SF of type SFBASIC (only after PetscSFSetup_Basic() already set up dependant fields */
static PetscErrorCode PetscSFSetUp_Basic_NVSHMEM(PetscSF sf)
{
PetscErrorCode ierr;
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
PetscInt i,nRemoteRootRanks,nRemoteLeafRanks;
PetscMPIInt tag;
MPI_Comm comm;
MPI_Request *rootreqs,*leafreqs;
PetscInt tmp,stmp[4],rtmp[4]; /* tmps for send/recv buffers */
PetscFunctionBegin;
ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr);
ierr = PetscObjectGetNewTag((PetscObject)sf,&tag);CHKERRQ(ierr);
nRemoteRootRanks = sf->nranks-sf->ndranks;
nRemoteLeafRanks = bas->niranks-bas->ndiranks;
sf->nRemoteRootRanks = nRemoteRootRanks;
bas->nRemoteLeafRanks = nRemoteLeafRanks;
ierr = PetscMalloc2(nRemoteLeafRanks,&rootreqs,nRemoteRootRanks,&leafreqs);CHKERRQ(ierr);
stmp[0] = nRemoteRootRanks;
stmp[1] = sf->leafbuflen[PETSCSF_REMOTE];
stmp[2] = nRemoteLeafRanks;
stmp[3] = bas->rootbuflen[PETSCSF_REMOTE];
ierr = MPIU_Allreduce(stmp,rtmp,4,MPIU_INT,MPI_MAX,comm);CHKERRMPI(ierr);
sf->nRemoteRootRanksMax = rtmp[0];
sf->leafbuflen_rmax = rtmp[1];
bas->nRemoteLeafRanksMax = rtmp[2];
bas->rootbuflen_rmax = rtmp[3];
/* Total four rounds of MPI communications to set up the nvshmem fields */
/* Root ranks to leaf ranks: send info about rootsigdisp[] and rootbufdisp[] */
ierr = PetscMalloc2(nRemoteRootRanks,&sf->rootsigdisp,nRemoteRootRanks,&sf->rootbufdisp);CHKERRQ(ierr);
for (i=0; i<nRemoteRootRanks; i++) {ierr = MPI_Irecv(&sf->rootsigdisp[i],1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm,&leafreqs[i]);CHKERRMPI(ierr);} /* Leaves recv */
for (i=0; i<nRemoteLeafRanks; i++) {ierr = MPI_Send(&i,1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm);CHKERRMPI(ierr);} /* Roots send. Note i changes, so we use MPI_Send. */
ierr = MPI_Waitall(nRemoteRootRanks,leafreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr);
for (i=0; i<nRemoteRootRanks; i++) {ierr = MPI_Irecv(&sf->rootbufdisp[i],1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm,&leafreqs[i]);CHKERRMPI(ierr);} /* Leaves recv */
for (i=0; i<nRemoteLeafRanks; i++) {
tmp = bas->ioffset[i+bas->ndiranks] - bas->ioffset[bas->ndiranks];
ierr = MPI_Send(&tmp,1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm);CHKERRMPI(ierr); /* Roots send. Note tmp changes, so we use MPI_Send. */
}
ierr = MPI_Waitall(nRemoteRootRanks,leafreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr);
cerr = hipMalloc((void**)&sf->rootbufdisp_d,nRemoteRootRanks*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipMalloc((void**)&sf->rootsigdisp_d,nRemoteRootRanks*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipMalloc((void**)&sf->ranks_d,nRemoteRootRanks*sizeof(PetscMPIInt));CHKERRCUDA(cerr);
cerr = hipMalloc((void**)&sf->roffset_d,(nRemoteRootRanks+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipMemcpyAsync(sf->rootbufdisp_d,sf->rootbufdisp,nRemoteRootRanks*sizeof(PetscInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
cerr = hipMemcpyAsync(sf->rootsigdisp_d,sf->rootsigdisp,nRemoteRootRanks*sizeof(PetscInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
cerr = hipMemcpyAsync(sf->ranks_d,sf->ranks+sf->ndranks,nRemoteRootRanks*sizeof(PetscMPIInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
cerr = hipMemcpyAsync(sf->roffset_d,sf->roffset+sf->ndranks,(nRemoteRootRanks+1)*sizeof(PetscInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
/* Leaf ranks to root ranks: send info about leafsigdisp[] and leafbufdisp[] */
ierr = PetscMalloc2(nRemoteLeafRanks,&bas->leafsigdisp,nRemoteLeafRanks,&bas->leafbufdisp);CHKERRQ(ierr);
for (i=0; i<nRemoteLeafRanks; i++) {ierr = MPI_Irecv(&bas->leafsigdisp[i],1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm,&rootreqs[i]);CHKERRMPI(ierr);}
for (i=0; i<nRemoteRootRanks; i++) {ierr = MPI_Send(&i,1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm);CHKERRMPI(ierr);}
ierr = MPI_Waitall(nRemoteLeafRanks,rootreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr);
for (i=0; i<nRemoteLeafRanks; i++) {ierr = MPI_Irecv(&bas->leafbufdisp[i],1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm,&rootreqs[i]);CHKERRMPI(ierr);}
for (i=0; i<nRemoteRootRanks; i++) {
tmp = sf->roffset[i+sf->ndranks] - sf->roffset[sf->ndranks];
ierr = MPI_Send(&tmp,1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm);CHKERRMPI(ierr);
}
ierr = MPI_Waitall(nRemoteLeafRanks,rootreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr);
cerr = hipMalloc((void**)&bas->leafbufdisp_d,nRemoteLeafRanks*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipMalloc((void**)&bas->leafsigdisp_d,nRemoteLeafRanks*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipMalloc((void**)&bas->iranks_d,nRemoteLeafRanks*sizeof(PetscMPIInt));CHKERRCUDA(cerr);
cerr = hipMalloc((void**)&bas->ioffset_d,(nRemoteLeafRanks+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipMemcpyAsync(bas->leafbufdisp_d,bas->leafbufdisp,nRemoteLeafRanks*sizeof(PetscInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
cerr = hipMemcpyAsync(bas->leafsigdisp_d,bas->leafsigdisp,nRemoteLeafRanks*sizeof(PetscInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
cerr = hipMemcpyAsync(bas->iranks_d,bas->iranks+bas->ndiranks,nRemoteLeafRanks*sizeof(PetscMPIInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
cerr = hipMemcpyAsync(bas->ioffset_d,bas->ioffset+bas->ndiranks,(nRemoteLeafRanks+1)*sizeof(PetscInt),hipMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
ierr = PetscFree2(rootreqs,leafreqs);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode PetscSFLinkNvshmemCheck(PetscSF sf,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,const void *leafdata,PetscBool *use_nvshmem)
{
PetscErrorCode ierr;
MPI_Comm comm;
PetscBool isBasic;
PetscMPIInt result = MPI_UNEQUAL;
PetscFunctionBegin;
ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr);
/* Check if the sf is eligible for NVSHMEM, if we have not checked yet.
Note the check result <use_nvshmem> must be the same over comm, since an SFLink must be collectively either NVSHMEM or MPI.
*/
sf->checked_nvshmem_eligibility = PETSC_TRUE;
if (sf->use_nvshmem && !sf->checked_nvshmem_eligibility) {
/* Only use NVSHMEM for SFBASIC on PETSC_COMM_WORLD */
ierr = PetscObjectTypeCompare((PetscObject)sf,PETSCSFBASIC,&isBasic);CHKERRQ(ierr);
if (isBasic) {ierr = MPI_Comm_compare(PETSC_COMM_WORLD,comm,&result);CHKERRMPI(ierr);}
if (!isBasic || (result != MPI_IDENT && result != MPI_CONGRUENT)) sf->use_nvshmem = PETSC_FALSE; /* If not eligible, clear the flag so that we don't try again */
/* Do further check: If on a rank, both rootdata and leafdata are NULL, we might think they are PETSC_MEMTYPE_CUDA (or HOST)
and then use NVSHMEM. But if root/leafmtypes on other ranks are PETSC_MEMTYPE_HOST (or DEVICE), this would lead to
inconsistency on the return value <use_nvshmem>. To be safe, we simply disable nvshmem on these rare SFs.
*/
if (sf->use_nvshmem) {
PetscInt hasNullRank = (!rootdata && !leafdata) ? 1 : 0;
ierr = MPI_Allreduce(MPI_IN_PLACE,&hasNullRank,1,MPIU_INT,MPI_LOR,comm);CHKERRMPI(ierr);
if (hasNullRank) sf->use_nvshmem = PETSC_FALSE;
}
sf->checked_nvshmem_eligibility = PETSC_TRUE; /* If eligible, don't do above check again */
}
/* Check if rootmtype and leafmtype collectively are PETSC_MEMTYPE_CUDA */
if (sf->use_nvshmem) {
PetscInt oneCuda = (!rootdata || PetscMemTypeCUDA(rootmtype)) && (!leafdata || PetscMemTypeCUDA(leafmtype)) ? 1 : 0; /* Do I use cuda for both root&leafmtype? */
PetscInt allCuda = oneCuda; /* Assume the same for all ranks. But if not, in opt mode, return value <use_nvshmem> won't be collective! */
#if defined(PETSC_USE_DEBUG) /* Check in debug mode. Note MPI_Allreduce is expensive, so only in debug mode */
ierr = MPI_Allreduce(&oneCuda,&allCuda,1,MPIU_INT,MPI_LAND,comm);CHKERRMPI(ierr);
if (allCuda != oneCuda) SETERRQ(comm,PETSC_ERR_SUP,"root/leaf mtypes are inconsistent among ranks, which may lead to SF nvshmem failure in opt mode. Add -use_nvshmem 0 to disable it.");
#endif
if (allCuda) {
ierr = PetscNvshmemInitializeCheck();CHKERRQ(ierr);
if (!sf->setup_nvshmem) { /* Set up nvshmem related fields on this SF on-demand */
ierr = PetscSFSetUp_Basic_NVSHMEM(sf);CHKERRQ(ierr);
sf->setup_nvshmem = PETSC_TRUE;
}
*use_nvshmem = PETSC_TRUE;
} else {
*use_nvshmem = PETSC_FALSE;
}
} else {
*use_nvshmem = PETSC_FALSE;
}
PetscFunctionReturn(0);
}
/* Build dependence between <stream> and <remoteCommStream> at the entry of NVSHMEM communication */
static PetscErrorCode PetscSFLinkBuildDependenceBegin(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt buflen = (direction == PETSCSF_ROOT2LEAF)? bas->rootbuflen[PETSCSF_REMOTE] : sf->leafbuflen[PETSCSF_REMOTE];
PetscFunctionBegin;
if (buflen) {
cerr = hipEventRecord(link->dataReady,link->stream);CHKERRCUDA(cerr);
cerr = hipStreamWaitEvent(link->remoteCommStream,link->dataReady,0);CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
/* Build dependence between <stream> and <remoteCommStream> at the exit of NVSHMEM communication */
static PetscErrorCode PetscSFLinkBuildDependenceEnd(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt buflen = (direction == PETSCSF_ROOT2LEAF)? sf->leafbuflen[PETSCSF_REMOTE] : bas->rootbuflen[PETSCSF_REMOTE];
PetscFunctionBegin;
/* If unpack to non-null device buffer, build the endRemoteComm dependance */
if (buflen) {
cerr = hipEventRecord(link->endRemoteComm,link->remoteCommStream);CHKERRCUDA(cerr);
cerr = hipStreamWaitEvent(link->stream,link->endRemoteComm,0);CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
/* Send/Put signals to remote ranks
Input parameters:
+ n - Number of remote ranks
. sig - Signal address in symmetric heap
. sigdisp - To i-th rank, use its signal at offset sigdisp[i]
. ranks - remote ranks
- newval - Set signals to this value
*/
__global__ static void NvshmemSendSignals(PetscInt n,uint64_t *sig,PetscInt *sigdisp,PetscMPIInt *ranks,uint64_t newval)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
/* Each thread puts one remote signal */
if (i < n) nvshmemx_uint64_signal(sig+sigdisp[i],newval,ranks[i]);
}
/* Wait until local signals equal to the expected value and then set them to a new value
Input parameters:
+ n - Number of signals
. sig - Local signal address
. expval - expected value
- newval - Set signals to this new value
*/
__global__ static void NvshmemWaitSignals(PetscInt n,uint64_t *sig,uint64_t expval,uint64_t newval)
{
#if 0
/* Akhil Langer@NVIDIA said using 1 thread and nvshmem_uint64_wait_until_all is better */
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
nvshmem_signal_wait_until(sig+i,NVSHMEM_CMP_EQ,expval);
sig[i] = newval;
}
#else
nvshmem_uint64_wait_until_all(sig,n,NULL/*no mask*/,NVSHMEM_CMP_EQ,expval);
for (int i=0; i<n; i++) sig[i] = newval;
#endif
}
/* ===========================================================================================================
A set of routines to support receiver initiated communication using the get method
The getting protocol is:
Sender has a send buf (sbuf) and a signal variable (ssig); Receiver has a recv buf (rbuf) and a signal variable (rsig);
All signal variables have an initial value 0.
Sender: | Receiver:
1. Wait ssig be 0, then set it to 1
2. Pack data into stand alone sbuf |
3. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0
| 2. Get data from remote sbuf to local rbuf
| 3. Put 1 to sender's ssig
| 4. Unpack data from local rbuf
===========================================================================================================*/
/* PrePack operation -- since sender will overwrite the send buffer which the receiver might be getting data from.
Sender waits for signals (from receivers) indicating receivers have finished getting data
*/
PetscErrorCode PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
uint64_t *sig;
PetscInt n;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* leaf ranks are getting data */
sig = link->rootSendSig; /* leaf ranks set my rootSendsig */
n = bas->nRemoteLeafRanks;
} else { /* LEAF2ROOT */
sig = link->leafSendSig;
n = sf->nRemoteRootRanks;
}
if (n) {
hipLaunchKernelGGL(( NvshmemWaitSignals), dim3(1),dim3(1),0,link->remoteCommStream, n,sig,0,1); /* wait the signals to be 0, then set them to 1 */
hipError_t cerr = hipGetLastError();CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
/* n thread blocks. Each takes in charge one remote rank */
__global__ static void GetDataFromRemotelyAccessible(PetscInt nsrcranks,PetscMPIInt *srcranks,const char *src,PetscInt *srcdisp,char *dst,PetscInt *dstdisp,PetscInt unitbytes)
{
int bid = blockIdx.x;
PetscMPIInt pe = srcranks[bid];
if (!nvshmem_ptr(src,pe)) {
PetscInt nelems = (dstdisp[bid+1]-dstdisp[bid])*unitbytes;
nvshmem_getmem_nbi(dst+(dstdisp[bid]-dstdisp[0])*unitbytes,src+srcdisp[bid]*unitbytes,nelems,pe);
}
}
/* Start communication -- Get data in the given direction */
PetscErrorCode PetscSFLinkGetDataBegin_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
PetscErrorCode ierr;
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
PetscInt nsrcranks,ndstranks,nLocallyAccessible = 0;
char *src,*dst;
PetscInt *srcdisp_h,*dstdisp_h;
PetscInt *srcdisp_d,*dstdisp_d;
PetscMPIInt *srcranks_h;
PetscMPIInt *srcranks_d,*dstranks_d;
uint64_t *dstsig;
PetscInt *dstsigdisp_d;
PetscFunctionBegin;
ierr = PetscSFLinkBuildDependenceBegin(sf,link,direction);CHKERRQ(ierr);
if (direction == PETSCSF_ROOT2LEAF) { /* src is root, dst is leaf; we will move data from src to dst */
nsrcranks = sf->nRemoteRootRanks;
src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* root buf is the send buf; it is in symmetric heap */
srcdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */
srcdisp_d = sf->rootbufdisp_d;
srcranks_h = sf->ranks+sf->ndranks; /* my (remote) root ranks */
srcranks_d = sf->ranks_d;
ndstranks = bas->nRemoteLeafRanks;
dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* recv buf is the local leaf buf, also in symmetric heap */
dstdisp_h = sf->roffset+sf->ndranks; /* offsets of the local leaf buf. Note dstdisp[0] is not necessarily 0 */
dstdisp_d = sf->roffset_d;
dstranks_d = bas->iranks_d; /* my (remote) leaf ranks */
dstsig = link->leafRecvSig;
dstsigdisp_d = bas->leafsigdisp_d;
} else { /* src is leaf, dst is root; we will move data from src to dst */
nsrcranks = bas->nRemoteLeafRanks;
src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* leaf buf is the send buf */
srcdisp_h = bas->leafbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */
srcdisp_d = bas->leafbufdisp_d;
srcranks_h = bas->iranks+bas->ndiranks; /* my (remote) root ranks */
srcranks_d = bas->iranks_d;
ndstranks = sf->nRemoteRootRanks;
dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* the local root buf is the recv buf */
dstdisp_h = bas->ioffset+bas->ndiranks; /* offsets of the local root buf. Note dstdisp[0] is not necessarily 0 */
dstdisp_d = bas->ioffset_d;
dstranks_d = sf->ranks_d; /* my (remote) root ranks */
dstsig = link->rootRecvSig;
dstsigdisp_d = sf->rootsigdisp_d;
}
/* After Pack operation -- src tells dst ranks that they are allowed to get data */
if (ndstranks) {
hipLaunchKernelGGL(( NvshmemSendSignals), dim3((ndstranks+255)/256),dim3(256),0,link->remoteCommStream, ndstranks,dstsig,dstsigdisp_d,dstranks_d,1); /* set signals to 1 */
cerr = hipGetLastError();CHKERRCUDA(cerr);
}
/* dst waits for signals (permissions) from src ranks to start getting data */
if (nsrcranks) {
hipLaunchKernelGGL(( NvshmemWaitSignals), dim3(1),dim3(1),0,link->remoteCommStream, nsrcranks,dstsig,1,0); /* wait the signals to be 1, then set them to 0 */
cerr = hipGetLastError();CHKERRCUDA(cerr);
}
/* dst gets data from src ranks using non-blocking nvshmem_gets, which are finished in PetscSFLinkGetDataEnd_NVSHMEM() */
/* Count number of locally accessible src ranks, which should be a small number */
for (int i=0; i<nsrcranks; i++) {if (nvshmem_ptr(src,srcranks_h[i])) nLocallyAccessible++;}
/* Get data from remotely accessible PEs */
if (nLocallyAccessible < nsrcranks) {
hipLaunchKernelGGL(( GetDataFromRemotelyAccessible), dim3(nsrcranks),dim3(1),0,link->remoteCommStream, nsrcranks,srcranks_d,src,srcdisp_d,dst,dstdisp_d,link->unitbytes);
cerr = hipGetLastError();CHKERRCUDA(cerr);
}
/* Get data from locally accessible PEs */
if (nLocallyAccessible) {
for (int i=0; i<nsrcranks; i++) {
int pe = srcranks_h[i];
if (nvshmem_ptr(src,pe)) {
size_t nelems = (dstdisp_h[i+1]-dstdisp_h[i])*link->unitbytes;
nvshmemx_getmem_nbi_on_stream(dst+(dstdisp_h[i]-dstdisp_h[0])*link->unitbytes,src+srcdisp_h[i]*link->unitbytes,nelems,pe,link->remoteCommStream);
}
}
}
PetscFunctionReturn(0);
}
/* Finish the communication (can be done before Unpack)
Receiver tells its senders that they are allowed to reuse their send buffer (since receiver has got data from their send buffer)
*/
PetscErrorCode PetscSFLinkGetDataEnd_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
PetscErrorCode ierr;
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
uint64_t *srcsig;
PetscInt nsrcranks,*srcsigdisp;
PetscMPIInt *srcranks;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* leaf ranks are getting data */
nsrcranks = sf->nRemoteRootRanks;
srcsig = link->rootSendSig; /* I want to set their root signal */
srcsigdisp = sf->rootsigdisp_d; /* offset of each root signal */
srcranks = sf->ranks_d; /* ranks of the n root ranks */
} else { /* LEAF2ROOT, root ranks are getting data */
nsrcranks = bas->nRemoteLeafRanks;
srcsig = link->leafSendSig;
srcsigdisp = bas->leafsigdisp_d;
srcranks = bas->iranks_d;
}
if (nsrcranks) {
nvshmemx_quiet_on_stream(link->remoteCommStream); /* Finish the nonblocking get, so that we can unpack afterwards */
cerr = hipGetLastError();CHKERRCUDA(cerr);
hipLaunchKernelGGL(( NvshmemSendSignals), dim3((nsrcranks+511)/512),dim3(512),0,link->remoteCommStream, nsrcranks,srcsig,srcsigdisp,srcranks,0); /* set signals to 0 */
cerr = hipGetLastError();CHKERRCUDA(cerr);
}
ierr = PetscSFLinkBuildDependenceEnd(sf,link,direction);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* ===========================================================================================================
A set of routines to support sender initiated communication using the put-based method (the default)
The putting protocol is:
Sender has a send buf (sbuf) and a send signal var (ssig); Receiver has a stand-alone recv buf (rbuf)
and a recv signal var (rsig); All signal variables have an initial value 0. rbuf is allocated by SF and
is in nvshmem space.
Sender: | Receiver:
|
1. Pack data into sbuf |
2. Wait ssig be 0, then set it to 1 |
3. Put data to remote stand-alone rbuf |
4. Fence // make sure 5 happens after 3 |
5. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0
| 2. Unpack data from local rbuf
| 3. Put 0 to sender's ssig
===========================================================================================================*/
/* n thread blocks. Each takes in charge one remote rank */
__global__ static void WaitAndPutDataToRemotelyAccessible(PetscInt ndstranks,PetscMPIInt *dstranks,char *dst,PetscInt *dstdisp,const char *src,PetscInt *srcdisp,uint64_t *srcsig,PetscInt unitbytes)
{
int bid = blockIdx.x;
PetscMPIInt pe = dstranks[bid];
if (!nvshmem_ptr(dst,pe)) {
PetscInt nelems = (srcdisp[bid+1]-srcdisp[bid])*unitbytes;
nvshmem_uint64_wait_until(srcsig+bid,NVSHMEM_CMP_EQ,0); /* Wait until the sig = 0 */
srcsig[bid] = 1;
nvshmem_putmem_nbi(dst+dstdisp[bid]*unitbytes,src+(srcdisp[bid]-srcdisp[0])*unitbytes,nelems,pe);
}
}
/* one-thread kernel, which takes in charge all locally accesible */
__global__ static void WaitSignalsFromLocallyAccessible(PetscInt ndstranks,PetscMPIInt *dstranks,uint64_t *srcsig,const char *dst)
{
for (int i=0; i<ndstranks; i++) {
int pe = dstranks[i];
if (nvshmem_ptr(dst,pe)) {
nvshmem_uint64_wait_until(srcsig+i,NVSHMEM_CMP_EQ,0); /* Wait until the sig = 0 */
srcsig[i] = 1;
}
}
}
/* Put data in the given direction */
PetscErrorCode PetscSFLinkPutDataBegin_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
PetscErrorCode ierr;
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
PetscInt ndstranks,nLocallyAccessible = 0;
char *src,*dst;
PetscInt *srcdisp_h,*dstdisp_h;
PetscInt *srcdisp_d,*dstdisp_d;
PetscMPIInt *dstranks_h;
PetscMPIInt *dstranks_d;
uint64_t *srcsig;
PetscFunctionBegin;
ierr = PetscSFLinkBuildDependenceBegin(sf,link,direction);CHKERRQ(ierr);
if (direction == PETSCSF_ROOT2LEAF) { /* put data in rootbuf to leafbuf */
ndstranks = bas->nRemoteLeafRanks; /* number of (remote) leaf ranks */
src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* Both src & dst must be symmetric */
dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
srcdisp_h = bas->ioffset+bas->ndiranks; /* offsets of rootbuf. srcdisp[0] is not necessarily zero */
srcdisp_d = bas->ioffset_d;
srcsig = link->rootSendSig;
dstdisp_h = bas->leafbufdisp; /* for my i-th remote leaf rank, I will access its leaf buf at offset leafbufdisp[i] */
dstdisp_d = bas->leafbufdisp_d;
dstranks_h = bas->iranks+bas->ndiranks; /* remote leaf ranks */
dstranks_d = bas->iranks_d;
} else { /* put data in leafbuf to rootbuf */
ndstranks = sf->nRemoteRootRanks;
src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
srcdisp_h = sf->roffset+sf->ndranks; /* offsets of leafbuf */
srcdisp_d = sf->roffset_d;
srcsig = link->leafSendSig;
dstdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its root buf at offset rootbufdisp[i] */
dstdisp_d = sf->rootbufdisp_d;
dstranks_h = sf->ranks+sf->ndranks; /* remote root ranks */
dstranks_d = sf->ranks_d;
}
/* Wait for signals and then put data to dst ranks using non-blocking nvshmem_put, which are finished in PetscSFLinkPutDataEnd_NVSHMEM */
/* Count number of locally accessible neighbors, which should be a small number */
for (int i=0; i<ndstranks; i++) {if (nvshmem_ptr(dst,dstranks_h[i])) nLocallyAccessible++;}
/* For remotely accessible PEs, send data to them in one kernel call */
if (nLocallyAccessible < ndstranks) {
hipLaunchKernelGGL(( WaitAndPutDataToRemotelyAccessible), dim3(ndstranks),dim3(1),0,link->remoteCommStream, ndstranks,dstranks_d,dst,dstdisp_d,src,srcdisp_d,srcsig,link->unitbytes);
cerr = hipGetLastError();CHKERRCUDA(cerr);
}
/* For locally accessible PEs, use host API, which uses CUDA copy-engines and is much faster than device API */
if (nLocallyAccessible) {
hipLaunchKernelGGL(( WaitSignalsFromLocallyAccessible), dim3(1),dim3(1),0,link->remoteCommStream, ndstranks,dstranks_d,srcsig,dst);
for (int i=0; i<ndstranks; i++) {
int pe = dstranks_h[i];
if (nvshmem_ptr(dst,pe)) { /* If return a non-null pointer, then <pe> is locally accessible */
size_t nelems = (srcdisp_h[i+1]-srcdisp_h[i])*link->unitbytes;
/* Initiate the nonblocking communication */
nvshmemx_putmem_nbi_on_stream(dst+dstdisp_h[i]*link->unitbytes,src+(srcdisp_h[i]-srcdisp_h[0])*link->unitbytes,nelems,pe,link->remoteCommStream);
}
}
}
if (nLocallyAccessible) {
nvshmemx_quiet_on_stream(link->remoteCommStream); /* Calling nvshmem_fence/quiet() does not fence the above nvshmemx_putmem_nbi_on_stream! */
}
PetscFunctionReturn(0);
}
/* A one-thread kernel. The thread takes in charge all remote PEs */
__global__ static void PutDataEnd(PetscInt nsrcranks,PetscInt ndstranks,PetscMPIInt *dstranks,uint64_t *dstsig,PetscInt *dstsigdisp)
{
/* TODO: Shall we finished the non-blocking remote puts? */
/* 1. Send a signal to each dst rank */
/* According to Akhil@NVIDIA, IB is orderred, so no fence is needed for remote PEs.
For local PEs, we already called nvshmemx_quiet_on_stream(). Therefore, we are good to send signals to all dst ranks now.
*/
for (int i=0; i<ndstranks; i++) {nvshmemx_uint64_signal(dstsig+dstsigdisp[i],1,dstranks[i]);} /* set sig to 1 */
/* 2. Wait for signals from src ranks (if any) */
if (nsrcranks) {
nvshmem_uint64_wait_until_all(dstsig,nsrcranks,NULL/*no mask*/,NVSHMEM_CMP_EQ,1); /* wait sigs to be 1, then set them to 0 */
for (int i=0; i<nsrcranks; i++) dstsig[i] = 0;
}
}
/* Finish the communication -- A receiver waits until it can access its receive buffer */
PetscErrorCode PetscSFLinkPutDataEnd_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
PetscErrorCode ierr;
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
PetscMPIInt *dstranks;
uint64_t *dstsig;
PetscInt nsrcranks,ndstranks,*dstsigdisp;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* put root data to leaf */
nsrcranks = sf->nRemoteRootRanks;
ndstranks = bas->nRemoteLeafRanks;
dstranks = bas->iranks_d; /* leaf ranks */
dstsig = link->leafRecvSig; /* I will set my leaf ranks's RecvSig */
dstsigdisp = bas->leafsigdisp_d; /* for my i-th remote leaf rank, I will access its signal at offset leafsigdisp[i] */
} else { /* LEAF2ROOT */
nsrcranks = bas->nRemoteLeafRanks;
ndstranks = sf->nRemoteRootRanks;
dstranks = sf->ranks_d;
dstsig = link->rootRecvSig;
dstsigdisp = sf->rootsigdisp_d;
}
if (nsrcranks || ndstranks) {
hipLaunchKernelGGL(( PutDataEnd), dim3(1),dim3(1),0,link->remoteCommStream, nsrcranks,ndstranks,dstranks,dstsig,dstsigdisp);
cerr = hipGetLastError();CHKERRCUDA(cerr);
}
ierr = PetscSFLinkBuildDependenceEnd(sf,link,direction);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* PostUnpack operation -- A receiver tells its senders that they are allowed to put data to here (it implies recv buf is free to take new data) */
PetscErrorCode PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
uint64_t *srcsig;
PetscInt nsrcranks,*srcsigdisp_d;
PetscMPIInt *srcranks_d;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* I allow my root ranks to put data to me */
nsrcranks = sf->nRemoteRootRanks;
srcsig = link->rootSendSig; /* I want to set their send signals */
srcsigdisp_d = sf->rootsigdisp_d; /* offset of each root signal */
srcranks_d = sf->ranks_d; /* ranks of the n root ranks */
} else { /* LEAF2ROOT */
nsrcranks = bas->nRemoteLeafRanks;
srcsig = link->leafSendSig;
srcsigdisp_d = bas->leafsigdisp_d;
srcranks_d = bas->iranks_d;
}
if (nsrcranks) {
hipLaunchKernelGGL(( NvshmemSendSignals), dim3((nsrcranks+255)/256),dim3(256),0,link->remoteCommStream, nsrcranks,srcsig,srcsigdisp_d,srcranks_d,0); /* Set remote signals to 0 */
hipError_t cerr = hipGetLastError();CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
/* Destructor when the link uses nvshmem for communication */
static PetscErrorCode PetscSFLinkDestroy_NVSHMEM(PetscSF sf,PetscSFLink link)
{
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
cerr = hipEventDestroy(link->dataReady);CHKERRCUDA(cerr);
cerr = hipEventDestroy(link->endRemoteComm);CHKERRCUDA(cerr);
cerr = hipStreamDestroy(link->remoteCommStream);CHKERRCUDA(cerr);
/* nvshmem does not need buffers on host, which should be NULL */
ierr = PetscNvshmemFree(link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr);
ierr = PetscNvshmemFree(link->leafSendSig);CHKERRQ(ierr);
ierr = PetscNvshmemFree(link->leafRecvSig);CHKERRQ(ierr);
ierr = PetscNvshmemFree(link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr);
ierr = PetscNvshmemFree(link->rootSendSig);CHKERRQ(ierr);
ierr = PetscNvshmemFree(link->rootRecvSig);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode PetscSFLinkCreate_NVSHMEM(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,const void *leafdata,MPI_Op op,PetscSFOperation sfop,PetscSFLink *mylink)
{
PetscErrorCode ierr;
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
PetscSFLink *p,link;
PetscBool match,rootdirect[2],leafdirect[2];
int greatestPriority;
PetscFunctionBegin;
/* Check to see if we can directly send/recv root/leafdata with the given sf, sfop and op.
We only care root/leafdirect[PETSCSF_REMOTE], since we never need intermeidate buffers in local communication with NVSHMEM.
*/
if (sfop == PETSCSF_BCAST) { /* Move data from rootbuf to leafbuf */
if (sf->use_nvshmem_get) {
rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* send buffer has to be stand-alone (can't be rootdata) */
leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE;
} else {
rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE;
leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* Our put-protocol always needs a nvshmem alloc'ed recv buffer */
}
} else if (sfop == PETSCSF_REDUCE) { /* Move data from leafbuf to rootbuf */
if (sf->use_nvshmem_get) {
rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE;
leafdirect[PETSCSF_REMOTE] = PETSC_FALSE;
} else {
rootdirect[PETSCSF_REMOTE] = PETSC_FALSE;
leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE;
}
} else { /* PETSCSF_FETCH */
rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* FETCH always need a separate rootbuf */
leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* We also force allocating a separate leafbuf so that leafdata and leafupdate can share mpi requests */
}
/* Look for free nvshmem links in cache */
for (p=&bas->avail; (link=*p); p=&link->next) {
if (link->use_nvshmem) {
ierr = MPIPetsc_Type_compare(unit,link->unit,&match);CHKERRQ(ierr);
if (match) {
*p = link->next; /* Remove from available list */
goto found;
}
}
}
ierr = PetscNew(&link);CHKERRQ(ierr);
ierr = PetscSFLinkSetUp_Host(sf,link,unit);CHKERRQ(ierr); /* Compute link->unitbytes, dup link->unit etc. */
if (sf->backend == PETSCSF_BACKEND_CUDA) {ierr = PetscSFLinkSetUp_CUDA(sf,link,unit);CHKERRQ(ierr);} /* Setup pack routines, streams etc */
#if defined(PETSC_HAVE_KOKKOS)
else if (sf->backend == PETSCSF_BACKEND_KOKKOS) {ierr = PetscSFLinkSetUp_Kokkos(sf,link,unit);CHKERRQ(ierr);}
#endif
link->rootdirect[PETSCSF_LOCAL] = PETSC_TRUE; /* For the local part we directly use root/leafdata */
link->leafdirect[PETSCSF_LOCAL] = PETSC_TRUE;
/* Init signals to zero */
if (!link->rootSendSig) {ierr = PetscNvshmemCalloc(bas->nRemoteLeafRanksMax*sizeof(uint64_t),(void**)&link->rootSendSig);CHKERRQ(ierr);}
if (!link->rootRecvSig) {ierr = PetscNvshmemCalloc(bas->nRemoteLeafRanksMax*sizeof(uint64_t),(void**)&link->rootRecvSig);CHKERRQ(ierr);}
if (!link->leafSendSig) {ierr = PetscNvshmemCalloc(sf->nRemoteRootRanksMax*sizeof(uint64_t),(void**)&link->leafSendSig);CHKERRQ(ierr);}
if (!link->leafRecvSig) {ierr = PetscNvshmemCalloc(sf->nRemoteRootRanksMax*sizeof(uint64_t),(void**)&link->leafRecvSig);CHKERRQ(ierr);}
link->use_nvshmem = PETSC_TRUE;
link->rootmtype = PETSC_MEMTYPE_DEVICE; /* Only need 0/1-based mtype from now on */
link->leafmtype = PETSC_MEMTYPE_DEVICE;
/* Overwrite some function pointers set by PetscSFLinkSetUp_CUDA */
link->Destroy = PetscSFLinkDestroy_NVSHMEM;
if (sf->use_nvshmem_get) { /* get-based protocol */
link->PrePack = PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM;
link->StartCommunication = PetscSFLinkGetDataBegin_NVSHMEM;
link->FinishCommunication = PetscSFLinkGetDataEnd_NVSHMEM;
} else { /* put-based protocol */
link->StartCommunication = PetscSFLinkPutDataBegin_NVSHMEM;
link->FinishCommunication = PetscSFLinkPutDataEnd_NVSHMEM;
link->PostUnpack = PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM;
}
cerr = hipDeviceGetStreamPriorityRange(NULL,&greatestPriority);CHKERRCUDA(cerr);
cerr = hipStreamCreateWithPriority(&link->remoteCommStream,hipStreamNonBlocking,greatestPriority);CHKERRCUDA(cerr);
cerr = hipEventCreateWithFlags(&link->dataReady,hipEventDisableTiming);CHKERRCUDA(cerr);
cerr = hipEventCreateWithFlags(&link->endRemoteComm,hipEventDisableTiming);CHKERRCUDA(cerr);
found:
if (rootdirect[PETSCSF_REMOTE]) {
link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char*)rootdata + bas->rootstart[PETSCSF_REMOTE]*link->unitbytes;
} else {
if (!link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) {
ierr = PetscNvshmemMalloc(bas->rootbuflen_rmax*link->unitbytes,(void**)&link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr);
}
link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
}
if (leafdirect[PETSCSF_REMOTE]) {
link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char*)leafdata + sf->leafstart[PETSCSF_REMOTE]*link->unitbytes;
} else {
if (!link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) {
ierr = PetscNvshmemMalloc(sf->leafbuflen_rmax*link->unitbytes,(void**)&link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr);
}
link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
}
link->rootdirect[PETSCSF_REMOTE] = rootdirect[PETSCSF_REMOTE];
link->leafdirect[PETSCSF_REMOTE] = leafdirect[PETSCSF_REMOTE];
link->rootdata = rootdata; /* root/leafdata are keys to look up links in PetscSFXxxEnd */
link->leafdata = leafdata;
link->next = bas->inuse;
bas->inuse = link;
*mylink = link;
PetscFunctionReturn(0);
}
#if defined(PETSC_USE_REAL_SINGLE)
PetscErrorCode PetscNvshmemSum(PetscInt count,float *dst,const float *src)
{
PetscErrorCode ierr;
PetscMPIInt num; /* Assume nvshmem's int is MPI's int */
PetscFunctionBegin;
ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr);
nvshmemx_float_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream);
PetscFunctionReturn(0);
}
PetscErrorCode PetscNvshmemMax(PetscInt count,float *dst,const float *src)
{
PetscErrorCode ierr;
PetscMPIInt num;
PetscFunctionBegin;
ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr);
nvshmemx_float_max_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream);
PetscFunctionReturn(0);
}
#elif defined(PETSC_USE_REAL_DOUBLE)
PetscErrorCode PetscNvshmemSum(PetscInt count,double *dst,const double *src)
{
PetscErrorCode ierr;
PetscMPIInt num;
PetscFunctionBegin;
ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr);
nvshmemx_double_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream);
PetscFunctionReturn(0);
}
PetscErrorCode PetscNvshmemMax(PetscInt count,double *dst,const double *src)
{
PetscErrorCode ierr;
PetscMPIInt num;
PetscFunctionBegin;
ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr);
nvshmemx_double_max_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream);
PetscFunctionReturn(0);
}
#endif
| 6a0af042cd2c126088e035aee4a26b1c6cd0aba4.cu | #include <petsc/private/cudavecimpl.h>
#include <petsccublas.h>
#include <../src/vec/is/sf/impls/basic/sfpack.h>
#include <mpi.h>
#include <nvshmem.h>
#include <nvshmemx.h>
PetscErrorCode PetscNvshmemInitializeCheck(void)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (!PetscNvshmemInitialized) { /* Note NVSHMEM does not provide a routine to check whether it is initialized */
nvshmemx_init_attr_t attr;
attr.mpi_comm = &PETSC_COMM_WORLD;
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr);
ierr = nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM,&attr);CHKERRQ(ierr);
PetscNvshmemInitialized = PETSC_TRUE;
PetscBeganNvshmem = PETSC_TRUE;
}
PetscFunctionReturn(0);
}
PetscErrorCode PetscNvshmemMalloc(size_t size, void** ptr)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscNvshmemInitializeCheck();CHKERRQ(ierr);
*ptr = nvshmem_malloc(size);
if (!*ptr) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"nvshmem_malloc() failed to allocate %zu bytes",size);
PetscFunctionReturn(0);
}
PetscErrorCode PetscNvshmemCalloc(size_t size, void**ptr)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscNvshmemInitializeCheck();CHKERRQ(ierr);
*ptr = nvshmem_calloc(size,1);
if (!*ptr) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"nvshmem_calloc() failed to allocate %zu bytes",size);
PetscFunctionReturn(0);
}
PetscErrorCode PetscNvshmemFree_Private(void* ptr)
{
PetscFunctionBegin;
nvshmem_free(ptr);
PetscFunctionReturn(0);
}
PetscErrorCode PetscNvshmemFinalize(void)
{
PetscFunctionBegin;
nvshmem_finalize();
PetscFunctionReturn(0);
}
/* Free nvshmem related fields in the SF */
PetscErrorCode PetscSFReset_Basic_NVSHMEM(PetscSF sf)
{
PetscErrorCode ierr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
PetscFunctionBegin;
ierr = PetscFree2(bas->leafsigdisp,bas->leafbufdisp);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->leafbufdisp_d);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->leafsigdisp_d);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->iranks_d);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->ioffset_d);CHKERRQ(ierr);
ierr = PetscFree2(sf->rootsigdisp,sf->rootbufdisp);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->rootbufdisp_d);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->rootsigdisp_d);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->ranks_d);CHKERRQ(ierr);
ierr = PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->roffset_d);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* Set up NVSHMEM related fields for an SF of type SFBASIC (only after PetscSFSetup_Basic() already set up dependant fields */
static PetscErrorCode PetscSFSetUp_Basic_NVSHMEM(PetscSF sf)
{
PetscErrorCode ierr;
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
PetscInt i,nRemoteRootRanks,nRemoteLeafRanks;
PetscMPIInt tag;
MPI_Comm comm;
MPI_Request *rootreqs,*leafreqs;
PetscInt tmp,stmp[4],rtmp[4]; /* tmps for send/recv buffers */
PetscFunctionBegin;
ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr);
ierr = PetscObjectGetNewTag((PetscObject)sf,&tag);CHKERRQ(ierr);
nRemoteRootRanks = sf->nranks-sf->ndranks;
nRemoteLeafRanks = bas->niranks-bas->ndiranks;
sf->nRemoteRootRanks = nRemoteRootRanks;
bas->nRemoteLeafRanks = nRemoteLeafRanks;
ierr = PetscMalloc2(nRemoteLeafRanks,&rootreqs,nRemoteRootRanks,&leafreqs);CHKERRQ(ierr);
stmp[0] = nRemoteRootRanks;
stmp[1] = sf->leafbuflen[PETSCSF_REMOTE];
stmp[2] = nRemoteLeafRanks;
stmp[3] = bas->rootbuflen[PETSCSF_REMOTE];
ierr = MPIU_Allreduce(stmp,rtmp,4,MPIU_INT,MPI_MAX,comm);CHKERRMPI(ierr);
sf->nRemoteRootRanksMax = rtmp[0];
sf->leafbuflen_rmax = rtmp[1];
bas->nRemoteLeafRanksMax = rtmp[2];
bas->rootbuflen_rmax = rtmp[3];
/* Total four rounds of MPI communications to set up the nvshmem fields */
/* Root ranks to leaf ranks: send info about rootsigdisp[] and rootbufdisp[] */
ierr = PetscMalloc2(nRemoteRootRanks,&sf->rootsigdisp,nRemoteRootRanks,&sf->rootbufdisp);CHKERRQ(ierr);
for (i=0; i<nRemoteRootRanks; i++) {ierr = MPI_Irecv(&sf->rootsigdisp[i],1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm,&leafreqs[i]);CHKERRMPI(ierr);} /* Leaves recv */
for (i=0; i<nRemoteLeafRanks; i++) {ierr = MPI_Send(&i,1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm);CHKERRMPI(ierr);} /* Roots send. Note i changes, so we use MPI_Send. */
ierr = MPI_Waitall(nRemoteRootRanks,leafreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr);
for (i=0; i<nRemoteRootRanks; i++) {ierr = MPI_Irecv(&sf->rootbufdisp[i],1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm,&leafreqs[i]);CHKERRMPI(ierr);} /* Leaves recv */
for (i=0; i<nRemoteLeafRanks; i++) {
tmp = bas->ioffset[i+bas->ndiranks] - bas->ioffset[bas->ndiranks];
ierr = MPI_Send(&tmp,1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm);CHKERRMPI(ierr); /* Roots send. Note tmp changes, so we use MPI_Send. */
}
ierr = MPI_Waitall(nRemoteRootRanks,leafreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr);
cerr = cudaMalloc((void**)&sf->rootbufdisp_d,nRemoteRootRanks*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMalloc((void**)&sf->rootsigdisp_d,nRemoteRootRanks*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMalloc((void**)&sf->ranks_d,nRemoteRootRanks*sizeof(PetscMPIInt));CHKERRCUDA(cerr);
cerr = cudaMalloc((void**)&sf->roffset_d,(nRemoteRootRanks+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMemcpyAsync(sf->rootbufdisp_d,sf->rootbufdisp,nRemoteRootRanks*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
cerr = cudaMemcpyAsync(sf->rootsigdisp_d,sf->rootsigdisp,nRemoteRootRanks*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
cerr = cudaMemcpyAsync(sf->ranks_d,sf->ranks+sf->ndranks,nRemoteRootRanks*sizeof(PetscMPIInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
cerr = cudaMemcpyAsync(sf->roffset_d,sf->roffset+sf->ndranks,(nRemoteRootRanks+1)*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
/* Leaf ranks to root ranks: send info about leafsigdisp[] and leafbufdisp[] */
ierr = PetscMalloc2(nRemoteLeafRanks,&bas->leafsigdisp,nRemoteLeafRanks,&bas->leafbufdisp);CHKERRQ(ierr);
for (i=0; i<nRemoteLeafRanks; i++) {ierr = MPI_Irecv(&bas->leafsigdisp[i],1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm,&rootreqs[i]);CHKERRMPI(ierr);}
for (i=0; i<nRemoteRootRanks; i++) {ierr = MPI_Send(&i,1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm);CHKERRMPI(ierr);}
ierr = MPI_Waitall(nRemoteLeafRanks,rootreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr);
for (i=0; i<nRemoteLeafRanks; i++) {ierr = MPI_Irecv(&bas->leafbufdisp[i],1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm,&rootreqs[i]);CHKERRMPI(ierr);}
for (i=0; i<nRemoteRootRanks; i++) {
tmp = sf->roffset[i+sf->ndranks] - sf->roffset[sf->ndranks];
ierr = MPI_Send(&tmp,1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm);CHKERRMPI(ierr);
}
ierr = MPI_Waitall(nRemoteLeafRanks,rootreqs,MPI_STATUSES_IGNORE);CHKERRMPI(ierr);
cerr = cudaMalloc((void**)&bas->leafbufdisp_d,nRemoteLeafRanks*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMalloc((void**)&bas->leafsigdisp_d,nRemoteLeafRanks*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMalloc((void**)&bas->iranks_d,nRemoteLeafRanks*sizeof(PetscMPIInt));CHKERRCUDA(cerr);
cerr = cudaMalloc((void**)&bas->ioffset_d,(nRemoteLeafRanks+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMemcpyAsync(bas->leafbufdisp_d,bas->leafbufdisp,nRemoteLeafRanks*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
cerr = cudaMemcpyAsync(bas->leafsigdisp_d,bas->leafsigdisp,nRemoteLeafRanks*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
cerr = cudaMemcpyAsync(bas->iranks_d,bas->iranks+bas->ndiranks,nRemoteLeafRanks*sizeof(PetscMPIInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
cerr = cudaMemcpyAsync(bas->ioffset_d,bas->ioffset+bas->ndiranks,(nRemoteLeafRanks+1)*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);CHKERRCUDA(cerr);
ierr = PetscFree2(rootreqs,leafreqs);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode PetscSFLinkNvshmemCheck(PetscSF sf,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,const void *leafdata,PetscBool *use_nvshmem)
{
PetscErrorCode ierr;
MPI_Comm comm;
PetscBool isBasic;
PetscMPIInt result = MPI_UNEQUAL;
PetscFunctionBegin;
ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr);
/* Check if the sf is eligible for NVSHMEM, if we have not checked yet.
Note the check result <use_nvshmem> must be the same over comm, since an SFLink must be collectively either NVSHMEM or MPI.
*/
sf->checked_nvshmem_eligibility = PETSC_TRUE;
if (sf->use_nvshmem && !sf->checked_nvshmem_eligibility) {
/* Only use NVSHMEM for SFBASIC on PETSC_COMM_WORLD */
ierr = PetscObjectTypeCompare((PetscObject)sf,PETSCSFBASIC,&isBasic);CHKERRQ(ierr);
if (isBasic) {ierr = MPI_Comm_compare(PETSC_COMM_WORLD,comm,&result);CHKERRMPI(ierr);}
if (!isBasic || (result != MPI_IDENT && result != MPI_CONGRUENT)) sf->use_nvshmem = PETSC_FALSE; /* If not eligible, clear the flag so that we don't try again */
/* Do further check: If on a rank, both rootdata and leafdata are NULL, we might think they are PETSC_MEMTYPE_CUDA (or HOST)
and then use NVSHMEM. But if root/leafmtypes on other ranks are PETSC_MEMTYPE_HOST (or DEVICE), this would lead to
inconsistency on the return value <use_nvshmem>. To be safe, we simply disable nvshmem on these rare SFs.
*/
if (sf->use_nvshmem) {
PetscInt hasNullRank = (!rootdata && !leafdata) ? 1 : 0;
ierr = MPI_Allreduce(MPI_IN_PLACE,&hasNullRank,1,MPIU_INT,MPI_LOR,comm);CHKERRMPI(ierr);
if (hasNullRank) sf->use_nvshmem = PETSC_FALSE;
}
sf->checked_nvshmem_eligibility = PETSC_TRUE; /* If eligible, don't do above check again */
}
/* Check if rootmtype and leafmtype collectively are PETSC_MEMTYPE_CUDA */
if (sf->use_nvshmem) {
PetscInt oneCuda = (!rootdata || PetscMemTypeCUDA(rootmtype)) && (!leafdata || PetscMemTypeCUDA(leafmtype)) ? 1 : 0; /* Do I use cuda for both root&leafmtype? */
PetscInt allCuda = oneCuda; /* Assume the same for all ranks. But if not, in opt mode, return value <use_nvshmem> won't be collective! */
#if defined(PETSC_USE_DEBUG) /* Check in debug mode. Note MPI_Allreduce is expensive, so only in debug mode */
ierr = MPI_Allreduce(&oneCuda,&allCuda,1,MPIU_INT,MPI_LAND,comm);CHKERRMPI(ierr);
if (allCuda != oneCuda) SETERRQ(comm,PETSC_ERR_SUP,"root/leaf mtypes are inconsistent among ranks, which may lead to SF nvshmem failure in opt mode. Add -use_nvshmem 0 to disable it.");
#endif
if (allCuda) {
ierr = PetscNvshmemInitializeCheck();CHKERRQ(ierr);
if (!sf->setup_nvshmem) { /* Set up nvshmem related fields on this SF on-demand */
ierr = PetscSFSetUp_Basic_NVSHMEM(sf);CHKERRQ(ierr);
sf->setup_nvshmem = PETSC_TRUE;
}
*use_nvshmem = PETSC_TRUE;
} else {
*use_nvshmem = PETSC_FALSE;
}
} else {
*use_nvshmem = PETSC_FALSE;
}
PetscFunctionReturn(0);
}
/* Build dependence between <stream> and <remoteCommStream> at the entry of NVSHMEM communication */
static PetscErrorCode PetscSFLinkBuildDependenceBegin(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt buflen = (direction == PETSCSF_ROOT2LEAF)? bas->rootbuflen[PETSCSF_REMOTE] : sf->leafbuflen[PETSCSF_REMOTE];
PetscFunctionBegin;
if (buflen) {
cerr = cudaEventRecord(link->dataReady,link->stream);CHKERRCUDA(cerr);
cerr = cudaStreamWaitEvent(link->remoteCommStream,link->dataReady,0);CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
/* Build dependence between <stream> and <remoteCommStream> at the exit of NVSHMEM communication */
static PetscErrorCode PetscSFLinkBuildDependenceEnd(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt buflen = (direction == PETSCSF_ROOT2LEAF)? sf->leafbuflen[PETSCSF_REMOTE] : bas->rootbuflen[PETSCSF_REMOTE];
PetscFunctionBegin;
/* If unpack to non-null device buffer, build the endRemoteComm dependance */
if (buflen) {
cerr = cudaEventRecord(link->endRemoteComm,link->remoteCommStream);CHKERRCUDA(cerr);
cerr = cudaStreamWaitEvent(link->stream,link->endRemoteComm,0);CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
/* Send/Put signals to remote ranks
Input parameters:
+ n - Number of remote ranks
. sig - Signal address in symmetric heap
. sigdisp - To i-th rank, use its signal at offset sigdisp[i]
. ranks - remote ranks
- newval - Set signals to this value
*/
__global__ static void NvshmemSendSignals(PetscInt n,uint64_t *sig,PetscInt *sigdisp,PetscMPIInt *ranks,uint64_t newval)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
/* Each thread puts one remote signal */
if (i < n) nvshmemx_uint64_signal(sig+sigdisp[i],newval,ranks[i]);
}
/* Wait until local signals equal to the expected value and then set them to a new value
Input parameters:
+ n - Number of signals
. sig - Local signal address
. expval - expected value
- newval - Set signals to this new value
*/
__global__ static void NvshmemWaitSignals(PetscInt n,uint64_t *sig,uint64_t expval,uint64_t newval)
{
#if 0
/* Akhil Langer@NVIDIA said using 1 thread and nvshmem_uint64_wait_until_all is better */
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
nvshmem_signal_wait_until(sig+i,NVSHMEM_CMP_EQ,expval);
sig[i] = newval;
}
#else
nvshmem_uint64_wait_until_all(sig,n,NULL/*no mask*/,NVSHMEM_CMP_EQ,expval);
for (int i=0; i<n; i++) sig[i] = newval;
#endif
}
/* ===========================================================================================================
A set of routines to support receiver initiated communication using the get method
The getting protocol is:
Sender has a send buf (sbuf) and a signal variable (ssig); Receiver has a recv buf (rbuf) and a signal variable (rsig);
All signal variables have an initial value 0.
Sender: | Receiver:
1. Wait ssig be 0, then set it to 1
2. Pack data into stand alone sbuf |
3. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0
| 2. Get data from remote sbuf to local rbuf
| 3. Put 1 to sender's ssig
| 4. Unpack data from local rbuf
===========================================================================================================*/
/* PrePack operation -- since sender will overwrite the send buffer which the receiver might be getting data from.
Sender waits for signals (from receivers) indicating receivers have finished getting data
*/
PetscErrorCode PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
uint64_t *sig;
PetscInt n;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* leaf ranks are getting data */
sig = link->rootSendSig; /* leaf ranks set my rootSendsig */
n = bas->nRemoteLeafRanks;
} else { /* LEAF2ROOT */
sig = link->leafSendSig;
n = sf->nRemoteRootRanks;
}
if (n) {
NvshmemWaitSignals<<<1,1,0,link->remoteCommStream>>>(n,sig,0,1); /* wait the signals to be 0, then set them to 1 */
cudaError_t cerr = cudaGetLastError();CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
/* n thread blocks. Each takes in charge one remote rank */
__global__ static void GetDataFromRemotelyAccessible(PetscInt nsrcranks,PetscMPIInt *srcranks,const char *src,PetscInt *srcdisp,char *dst,PetscInt *dstdisp,PetscInt unitbytes)
{
int bid = blockIdx.x;
PetscMPIInt pe = srcranks[bid];
if (!nvshmem_ptr(src,pe)) {
PetscInt nelems = (dstdisp[bid+1]-dstdisp[bid])*unitbytes;
nvshmem_getmem_nbi(dst+(dstdisp[bid]-dstdisp[0])*unitbytes,src+srcdisp[bid]*unitbytes,nelems,pe);
}
}
/* Start communication -- Get data in the given direction */
PetscErrorCode PetscSFLinkGetDataBegin_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
PetscErrorCode ierr;
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
PetscInt nsrcranks,ndstranks,nLocallyAccessible = 0;
char *src,*dst;
PetscInt *srcdisp_h,*dstdisp_h;
PetscInt *srcdisp_d,*dstdisp_d;
PetscMPIInt *srcranks_h;
PetscMPIInt *srcranks_d,*dstranks_d;
uint64_t *dstsig;
PetscInt *dstsigdisp_d;
PetscFunctionBegin;
ierr = PetscSFLinkBuildDependenceBegin(sf,link,direction);CHKERRQ(ierr);
if (direction == PETSCSF_ROOT2LEAF) { /* src is root, dst is leaf; we will move data from src to dst */
nsrcranks = sf->nRemoteRootRanks;
src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* root buf is the send buf; it is in symmetric heap */
srcdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */
srcdisp_d = sf->rootbufdisp_d;
srcranks_h = sf->ranks+sf->ndranks; /* my (remote) root ranks */
srcranks_d = sf->ranks_d;
ndstranks = bas->nRemoteLeafRanks;
dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* recv buf is the local leaf buf, also in symmetric heap */
dstdisp_h = sf->roffset+sf->ndranks; /* offsets of the local leaf buf. Note dstdisp[0] is not necessarily 0 */
dstdisp_d = sf->roffset_d;
dstranks_d = bas->iranks_d; /* my (remote) leaf ranks */
dstsig = link->leafRecvSig;
dstsigdisp_d = bas->leafsigdisp_d;
} else { /* src is leaf, dst is root; we will move data from src to dst */
nsrcranks = bas->nRemoteLeafRanks;
src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* leaf buf is the send buf */
srcdisp_h = bas->leafbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */
srcdisp_d = bas->leafbufdisp_d;
srcranks_h = bas->iranks+bas->ndiranks; /* my (remote) root ranks */
srcranks_d = bas->iranks_d;
ndstranks = sf->nRemoteRootRanks;
dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* the local root buf is the recv buf */
dstdisp_h = bas->ioffset+bas->ndiranks; /* offsets of the local root buf. Note dstdisp[0] is not necessarily 0 */
dstdisp_d = bas->ioffset_d;
dstranks_d = sf->ranks_d; /* my (remote) root ranks */
dstsig = link->rootRecvSig;
dstsigdisp_d = sf->rootsigdisp_d;
}
/* After Pack operation -- src tells dst ranks that they are allowed to get data */
if (ndstranks) {
NvshmemSendSignals<<<(ndstranks+255)/256,256,0,link->remoteCommStream>>>(ndstranks,dstsig,dstsigdisp_d,dstranks_d,1); /* set signals to 1 */
cerr = cudaGetLastError();CHKERRCUDA(cerr);
}
/* dst waits for signals (permissions) from src ranks to start getting data */
if (nsrcranks) {
NvshmemWaitSignals<<<1,1,0,link->remoteCommStream>>>(nsrcranks,dstsig,1,0); /* wait the signals to be 1, then set them to 0 */
cerr = cudaGetLastError();CHKERRCUDA(cerr);
}
/* dst gets data from src ranks using non-blocking nvshmem_gets, which are finished in PetscSFLinkGetDataEnd_NVSHMEM() */
/* Count number of locally accessible src ranks, which should be a small number */
for (int i=0; i<nsrcranks; i++) {if (nvshmem_ptr(src,srcranks_h[i])) nLocallyAccessible++;}
/* Get data from remotely accessible PEs */
if (nLocallyAccessible < nsrcranks) {
GetDataFromRemotelyAccessible<<<nsrcranks,1,0,link->remoteCommStream>>>(nsrcranks,srcranks_d,src,srcdisp_d,dst,dstdisp_d,link->unitbytes);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
}
/* Get data from locally accessible PEs */
if (nLocallyAccessible) {
for (int i=0; i<nsrcranks; i++) {
int pe = srcranks_h[i];
if (nvshmem_ptr(src,pe)) {
size_t nelems = (dstdisp_h[i+1]-dstdisp_h[i])*link->unitbytes;
nvshmemx_getmem_nbi_on_stream(dst+(dstdisp_h[i]-dstdisp_h[0])*link->unitbytes,src+srcdisp_h[i]*link->unitbytes,nelems,pe,link->remoteCommStream);
}
}
}
PetscFunctionReturn(0);
}
/* Finish the communication (can be done before Unpack)
Receiver tells its senders that they are allowed to reuse their send buffer (since receiver has got data from their send buffer)
*/
PetscErrorCode PetscSFLinkGetDataEnd_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
PetscErrorCode ierr;
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
uint64_t *srcsig;
PetscInt nsrcranks,*srcsigdisp;
PetscMPIInt *srcranks;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* leaf ranks are getting data */
nsrcranks = sf->nRemoteRootRanks;
srcsig = link->rootSendSig; /* I want to set their root signal */
srcsigdisp = sf->rootsigdisp_d; /* offset of each root signal */
srcranks = sf->ranks_d; /* ranks of the n root ranks */
} else { /* LEAF2ROOT, root ranks are getting data */
nsrcranks = bas->nRemoteLeafRanks;
srcsig = link->leafSendSig;
srcsigdisp = bas->leafsigdisp_d;
srcranks = bas->iranks_d;
}
if (nsrcranks) {
nvshmemx_quiet_on_stream(link->remoteCommStream); /* Finish the nonblocking get, so that we can unpack afterwards */
cerr = cudaGetLastError();CHKERRCUDA(cerr);
NvshmemSendSignals<<<(nsrcranks+511)/512,512,0,link->remoteCommStream>>>(nsrcranks,srcsig,srcsigdisp,srcranks,0); /* set signals to 0 */
cerr = cudaGetLastError();CHKERRCUDA(cerr);
}
ierr = PetscSFLinkBuildDependenceEnd(sf,link,direction);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* ===========================================================================================================
A set of routines to support sender initiated communication using the put-based method (the default)
The putting protocol is:
Sender has a send buf (sbuf) and a send signal var (ssig); Receiver has a stand-alone recv buf (rbuf)
and a recv signal var (rsig); All signal variables have an initial value 0. rbuf is allocated by SF and
is in nvshmem space.
Sender: | Receiver:
|
1. Pack data into sbuf |
2. Wait ssig be 0, then set it to 1 |
3. Put data to remote stand-alone rbuf |
4. Fence // make sure 5 happens after 3 |
5. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0
| 2. Unpack data from local rbuf
| 3. Put 0 to sender's ssig
===========================================================================================================*/
/* n thread blocks. Each takes in charge one remote rank */
__global__ static void WaitAndPutDataToRemotelyAccessible(PetscInt ndstranks,PetscMPIInt *dstranks,char *dst,PetscInt *dstdisp,const char *src,PetscInt *srcdisp,uint64_t *srcsig,PetscInt unitbytes)
{
int bid = blockIdx.x;
PetscMPIInt pe = dstranks[bid];
if (!nvshmem_ptr(dst,pe)) {
PetscInt nelems = (srcdisp[bid+1]-srcdisp[bid])*unitbytes;
nvshmem_uint64_wait_until(srcsig+bid,NVSHMEM_CMP_EQ,0); /* Wait until the sig = 0 */
srcsig[bid] = 1;
nvshmem_putmem_nbi(dst+dstdisp[bid]*unitbytes,src+(srcdisp[bid]-srcdisp[0])*unitbytes,nelems,pe);
}
}
/* one-thread kernel, which takes in charge all locally accesible */
__global__ static void WaitSignalsFromLocallyAccessible(PetscInt ndstranks,PetscMPIInt *dstranks,uint64_t *srcsig,const char *dst)
{
for (int i=0; i<ndstranks; i++) {
int pe = dstranks[i];
if (nvshmem_ptr(dst,pe)) {
nvshmem_uint64_wait_until(srcsig+i,NVSHMEM_CMP_EQ,0); /* Wait until the sig = 0 */
srcsig[i] = 1;
}
}
}
/* Put data in the given direction */
PetscErrorCode PetscSFLinkPutDataBegin_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
PetscErrorCode ierr;
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
PetscInt ndstranks,nLocallyAccessible = 0;
char *src,*dst;
PetscInt *srcdisp_h,*dstdisp_h;
PetscInt *srcdisp_d,*dstdisp_d;
PetscMPIInt *dstranks_h;
PetscMPIInt *dstranks_d;
uint64_t *srcsig;
PetscFunctionBegin;
ierr = PetscSFLinkBuildDependenceBegin(sf,link,direction);CHKERRQ(ierr);
if (direction == PETSCSF_ROOT2LEAF) { /* put data in rootbuf to leafbuf */
ndstranks = bas->nRemoteLeafRanks; /* number of (remote) leaf ranks */
src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* Both src & dst must be symmetric */
dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
srcdisp_h = bas->ioffset+bas->ndiranks; /* offsets of rootbuf. srcdisp[0] is not necessarily zero */
srcdisp_d = bas->ioffset_d;
srcsig = link->rootSendSig;
dstdisp_h = bas->leafbufdisp; /* for my i-th remote leaf rank, I will access its leaf buf at offset leafbufdisp[i] */
dstdisp_d = bas->leafbufdisp_d;
dstranks_h = bas->iranks+bas->ndiranks; /* remote leaf ranks */
dstranks_d = bas->iranks_d;
} else { /* put data in leafbuf to rootbuf */
ndstranks = sf->nRemoteRootRanks;
src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
srcdisp_h = sf->roffset+sf->ndranks; /* offsets of leafbuf */
srcdisp_d = sf->roffset_d;
srcsig = link->leafSendSig;
dstdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its root buf at offset rootbufdisp[i] */
dstdisp_d = sf->rootbufdisp_d;
dstranks_h = sf->ranks+sf->ndranks; /* remote root ranks */
dstranks_d = sf->ranks_d;
}
/* Wait for signals and then put data to dst ranks using non-blocking nvshmem_put, which are finished in PetscSFLinkPutDataEnd_NVSHMEM */
/* Count number of locally accessible neighbors, which should be a small number */
for (int i=0; i<ndstranks; i++) {if (nvshmem_ptr(dst,dstranks_h[i])) nLocallyAccessible++;}
/* For remotely accessible PEs, send data to them in one kernel call */
if (nLocallyAccessible < ndstranks) {
WaitAndPutDataToRemotelyAccessible<<<ndstranks,1,0,link->remoteCommStream>>>(ndstranks,dstranks_d,dst,dstdisp_d,src,srcdisp_d,srcsig,link->unitbytes);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
}
/* For locally accessible PEs, use host API, which uses CUDA copy-engines and is much faster than device API */
if (nLocallyAccessible) {
WaitSignalsFromLocallyAccessible<<<1,1,0,link->remoteCommStream>>>(ndstranks,dstranks_d,srcsig,dst);
for (int i=0; i<ndstranks; i++) {
int pe = dstranks_h[i];
if (nvshmem_ptr(dst,pe)) { /* If return a non-null pointer, then <pe> is locally accessible */
size_t nelems = (srcdisp_h[i+1]-srcdisp_h[i])*link->unitbytes;
/* Initiate the nonblocking communication */
nvshmemx_putmem_nbi_on_stream(dst+dstdisp_h[i]*link->unitbytes,src+(srcdisp_h[i]-srcdisp_h[0])*link->unitbytes,nelems,pe,link->remoteCommStream);
}
}
}
if (nLocallyAccessible) {
nvshmemx_quiet_on_stream(link->remoteCommStream); /* Calling nvshmem_fence/quiet() does not fence the above nvshmemx_putmem_nbi_on_stream! */
}
PetscFunctionReturn(0);
}
/* A one-thread kernel. The thread takes in charge all remote PEs */
__global__ static void PutDataEnd(PetscInt nsrcranks,PetscInt ndstranks,PetscMPIInt *dstranks,uint64_t *dstsig,PetscInt *dstsigdisp)
{
/* TODO: Shall we finished the non-blocking remote puts? */
/* 1. Send a signal to each dst rank */
/* According to Akhil@NVIDIA, IB is orderred, so no fence is needed for remote PEs.
For local PEs, we already called nvshmemx_quiet_on_stream(). Therefore, we are good to send signals to all dst ranks now.
*/
for (int i=0; i<ndstranks; i++) {nvshmemx_uint64_signal(dstsig+dstsigdisp[i],1,dstranks[i]);} /* set sig to 1 */
/* 2. Wait for signals from src ranks (if any) */
if (nsrcranks) {
nvshmem_uint64_wait_until_all(dstsig,nsrcranks,NULL/*no mask*/,NVSHMEM_CMP_EQ,1); /* wait sigs to be 1, then set them to 0 */
for (int i=0; i<nsrcranks; i++) dstsig[i] = 0;
}
}
/* Finish the communication -- A receiver waits until it can access its receive buffer */
PetscErrorCode PetscSFLinkPutDataEnd_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
PetscErrorCode ierr;
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
PetscMPIInt *dstranks;
uint64_t *dstsig;
PetscInt nsrcranks,ndstranks,*dstsigdisp;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* put root data to leaf */
nsrcranks = sf->nRemoteRootRanks;
ndstranks = bas->nRemoteLeafRanks;
dstranks = bas->iranks_d; /* leaf ranks */
dstsig = link->leafRecvSig; /* I will set my leaf ranks's RecvSig */
dstsigdisp = bas->leafsigdisp_d; /* for my i-th remote leaf rank, I will access its signal at offset leafsigdisp[i] */
} else { /* LEAF2ROOT */
nsrcranks = bas->nRemoteLeafRanks;
ndstranks = sf->nRemoteRootRanks;
dstranks = sf->ranks_d;
dstsig = link->rootRecvSig;
dstsigdisp = sf->rootsigdisp_d;
}
if (nsrcranks || ndstranks) {
PutDataEnd<<<1,1,0,link->remoteCommStream>>>(nsrcranks,ndstranks,dstranks,dstsig,dstsigdisp);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
}
ierr = PetscSFLinkBuildDependenceEnd(sf,link,direction);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* PostUnpack operation -- A receiver tells its senders that they are allowed to put data to here (it implies recv buf is free to take new data) */
PetscErrorCode PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
{
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
uint64_t *srcsig;
PetscInt nsrcranks,*srcsigdisp_d;
PetscMPIInt *srcranks_d;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* I allow my root ranks to put data to me */
nsrcranks = sf->nRemoteRootRanks;
srcsig = link->rootSendSig; /* I want to set their send signals */
srcsigdisp_d = sf->rootsigdisp_d; /* offset of each root signal */
srcranks_d = sf->ranks_d; /* ranks of the n root ranks */
} else { /* LEAF2ROOT */
nsrcranks = bas->nRemoteLeafRanks;
srcsig = link->leafSendSig;
srcsigdisp_d = bas->leafsigdisp_d;
srcranks_d = bas->iranks_d;
}
if (nsrcranks) {
NvshmemSendSignals<<<(nsrcranks+255)/256,256,0,link->remoteCommStream>>>(nsrcranks,srcsig,srcsigdisp_d,srcranks_d,0); /* Set remote signals to 0 */
cudaError_t cerr = cudaGetLastError();CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
/* Destructor when the link uses nvshmem for communication */
static PetscErrorCode PetscSFLinkDestroy_NVSHMEM(PetscSF sf,PetscSFLink link)
{
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
cerr = cudaEventDestroy(link->dataReady);CHKERRCUDA(cerr);
cerr = cudaEventDestroy(link->endRemoteComm);CHKERRCUDA(cerr);
cerr = cudaStreamDestroy(link->remoteCommStream);CHKERRCUDA(cerr);
/* nvshmem does not need buffers on host, which should be NULL */
ierr = PetscNvshmemFree(link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr);
ierr = PetscNvshmemFree(link->leafSendSig);CHKERRQ(ierr);
ierr = PetscNvshmemFree(link->leafRecvSig);CHKERRQ(ierr);
ierr = PetscNvshmemFree(link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr);
ierr = PetscNvshmemFree(link->rootSendSig);CHKERRQ(ierr);
ierr = PetscNvshmemFree(link->rootRecvSig);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode PetscSFLinkCreate_NVSHMEM(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,const void *leafdata,MPI_Op op,PetscSFOperation sfop,PetscSFLink *mylink)
{
PetscErrorCode ierr;
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
PetscSFLink *p,link;
PetscBool match,rootdirect[2],leafdirect[2];
int greatestPriority;
PetscFunctionBegin;
/* Check to see if we can directly send/recv root/leafdata with the given sf, sfop and op.
We only care root/leafdirect[PETSCSF_REMOTE], since we never need intermeidate buffers in local communication with NVSHMEM.
*/
if (sfop == PETSCSF_BCAST) { /* Move data from rootbuf to leafbuf */
if (sf->use_nvshmem_get) {
rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* send buffer has to be stand-alone (can't be rootdata) */
leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE;
} else {
rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE;
leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* Our put-protocol always needs a nvshmem alloc'ed recv buffer */
}
} else if (sfop == PETSCSF_REDUCE) { /* Move data from leafbuf to rootbuf */
if (sf->use_nvshmem_get) {
rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE;
leafdirect[PETSCSF_REMOTE] = PETSC_FALSE;
} else {
rootdirect[PETSCSF_REMOTE] = PETSC_FALSE;
leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE;
}
} else { /* PETSCSF_FETCH */
rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* FETCH always need a separate rootbuf */
leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* We also force allocating a separate leafbuf so that leafdata and leafupdate can share mpi requests */
}
/* Look for free nvshmem links in cache */
for (p=&bas->avail; (link=*p); p=&link->next) {
if (link->use_nvshmem) {
ierr = MPIPetsc_Type_compare(unit,link->unit,&match);CHKERRQ(ierr);
if (match) {
*p = link->next; /* Remove from available list */
goto found;
}
}
}
ierr = PetscNew(&link);CHKERRQ(ierr);
ierr = PetscSFLinkSetUp_Host(sf,link,unit);CHKERRQ(ierr); /* Compute link->unitbytes, dup link->unit etc. */
if (sf->backend == PETSCSF_BACKEND_CUDA) {ierr = PetscSFLinkSetUp_CUDA(sf,link,unit);CHKERRQ(ierr);} /* Setup pack routines, streams etc */
#if defined(PETSC_HAVE_KOKKOS)
else if (sf->backend == PETSCSF_BACKEND_KOKKOS) {ierr = PetscSFLinkSetUp_Kokkos(sf,link,unit);CHKERRQ(ierr);}
#endif
link->rootdirect[PETSCSF_LOCAL] = PETSC_TRUE; /* For the local part we directly use root/leafdata */
link->leafdirect[PETSCSF_LOCAL] = PETSC_TRUE;
/* Init signals to zero */
if (!link->rootSendSig) {ierr = PetscNvshmemCalloc(bas->nRemoteLeafRanksMax*sizeof(uint64_t),(void**)&link->rootSendSig);CHKERRQ(ierr);}
if (!link->rootRecvSig) {ierr = PetscNvshmemCalloc(bas->nRemoteLeafRanksMax*sizeof(uint64_t),(void**)&link->rootRecvSig);CHKERRQ(ierr);}
if (!link->leafSendSig) {ierr = PetscNvshmemCalloc(sf->nRemoteRootRanksMax*sizeof(uint64_t),(void**)&link->leafSendSig);CHKERRQ(ierr);}
if (!link->leafRecvSig) {ierr = PetscNvshmemCalloc(sf->nRemoteRootRanksMax*sizeof(uint64_t),(void**)&link->leafRecvSig);CHKERRQ(ierr);}
link->use_nvshmem = PETSC_TRUE;
link->rootmtype = PETSC_MEMTYPE_DEVICE; /* Only need 0/1-based mtype from now on */
link->leafmtype = PETSC_MEMTYPE_DEVICE;
/* Overwrite some function pointers set by PetscSFLinkSetUp_CUDA */
link->Destroy = PetscSFLinkDestroy_NVSHMEM;
if (sf->use_nvshmem_get) { /* get-based protocol */
link->PrePack = PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM;
link->StartCommunication = PetscSFLinkGetDataBegin_NVSHMEM;
link->FinishCommunication = PetscSFLinkGetDataEnd_NVSHMEM;
} else { /* put-based protocol */
link->StartCommunication = PetscSFLinkPutDataBegin_NVSHMEM;
link->FinishCommunication = PetscSFLinkPutDataEnd_NVSHMEM;
link->PostUnpack = PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM;
}
cerr = cudaDeviceGetStreamPriorityRange(NULL,&greatestPriority);CHKERRCUDA(cerr);
cerr = cudaStreamCreateWithPriority(&link->remoteCommStream,cudaStreamNonBlocking,greatestPriority);CHKERRCUDA(cerr);
cerr = cudaEventCreateWithFlags(&link->dataReady,cudaEventDisableTiming);CHKERRCUDA(cerr);
cerr = cudaEventCreateWithFlags(&link->endRemoteComm,cudaEventDisableTiming);CHKERRCUDA(cerr);
found:
if (rootdirect[PETSCSF_REMOTE]) {
link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char*)rootdata + bas->rootstart[PETSCSF_REMOTE]*link->unitbytes;
} else {
if (!link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) {
ierr = PetscNvshmemMalloc(bas->rootbuflen_rmax*link->unitbytes,(void**)&link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr);
}
link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
}
if (leafdirect[PETSCSF_REMOTE]) {
link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char*)leafdata + sf->leafstart[PETSCSF_REMOTE]*link->unitbytes;
} else {
if (!link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) {
ierr = PetscNvshmemMalloc(sf->leafbuflen_rmax*link->unitbytes,(void**)&link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);CHKERRQ(ierr);
}
link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
}
link->rootdirect[PETSCSF_REMOTE] = rootdirect[PETSCSF_REMOTE];
link->leafdirect[PETSCSF_REMOTE] = leafdirect[PETSCSF_REMOTE];
link->rootdata = rootdata; /* root/leafdata are keys to look up links in PetscSFXxxEnd */
link->leafdata = leafdata;
link->next = bas->inuse;
bas->inuse = link;
*mylink = link;
PetscFunctionReturn(0);
}
#if defined(PETSC_USE_REAL_SINGLE)
PetscErrorCode PetscNvshmemSum(PetscInt count,float *dst,const float *src)
{
PetscErrorCode ierr;
PetscMPIInt num; /* Assume nvshmem's int is MPI's int */
PetscFunctionBegin;
ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr);
nvshmemx_float_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream);
PetscFunctionReturn(0);
}
PetscErrorCode PetscNvshmemMax(PetscInt count,float *dst,const float *src)
{
PetscErrorCode ierr;
PetscMPIInt num;
PetscFunctionBegin;
ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr);
nvshmemx_float_max_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream);
PetscFunctionReturn(0);
}
#elif defined(PETSC_USE_REAL_DOUBLE)
PetscErrorCode PetscNvshmemSum(PetscInt count,double *dst,const double *src)
{
PetscErrorCode ierr;
PetscMPIInt num;
PetscFunctionBegin;
ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr);
nvshmemx_double_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream);
PetscFunctionReturn(0);
}
PetscErrorCode PetscNvshmemMax(PetscInt count,double *dst,const double *src)
{
PetscErrorCode ierr;
PetscMPIInt num;
PetscFunctionBegin;
ierr = PetscMPIIntCast(count,&num);CHKERRQ(ierr);
nvshmemx_double_max_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream);
PetscFunctionReturn(0);
}
#endif
|
4f071d7ffae6601a0dd9d9d18f587f019224b0d4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <iostream>
#include <vector>
//includes de CUDA
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "hip/device_functions.h"
//Clases necesarias
#include "BinaryRestriction.h"
#include "VectorBR.h"
#include "BinaryMixer.h"
//__device__ int cont = 0;
int block_width = 1024;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: \"%s\": %s %s %d\n", hipGetErrorName(code), hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//funcion que me regrese el contador
__global__ void ParallelMix(unsigned long long int * d_cont ,unsigned int number_threads, int d_rest_size ,int numStations, unsigned int * d_temp, unsigned int * d_rest, unsigned int * d_out) {
//ID del thread
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
//Condition that verifies the usefulness of the thread.
if (tid < number_threads) {
//VARIABLES
//Esta variable auxiliar denota la doble asignacin de una actividad si es >0.
unsigned int aux_v = 0;
//Esta otra variable denota que actividades se hana asignado.
unsigned int cum_v = 0;
//Bandera que se dispara si una actividade se ha asigando dos veces.
bool flag = false;
//Este vector auxiliar es una cota, segun el enumero de estaciones de un problema.
//Por el momento dejarlo en 7. (revisar despues con el doc)
unsigned int aux[7] = { 0,0,0,0,0,0,0 };
//Imprimimos el thread
//printf("Thread Num: %d \n", number_threads);
//printf("Thread ID: %d \n", tid);
//for donde se itern sobre todas las tablas de solucin en Ri.
//Cada thread intersectar cada tabla de solucin en la nueva restriccion d_temp.
for (int i = 0; i <d_rest_size ; i++) {
//Perform OR operations on every line (STATION) of the solution table.
//Se hace un or en cada linea (osea cad estacin) de la tabla de solucin.
//Esto para identificar si una actividad esta asignada a ms de una estacin.
for (int k = 0; k<numStations; k++) {
// El pseudo cdigo del algoritmo de interseccin booleana es el siguiente.
//1. Perform OR operation on table row (k) from tid solution table (d_temp) and new constraint (d_rest).
//1. Primero haz un or sobre la fila k (osea estacin) de la tabla de solucin d_temp y la nueva restriccio d_rest.
// Se guarda el resultado en aux[k].cx, que declaramos arriba.
// 1.1. Realizar operacin AND con aux[k] y cum_v.
///////////////////////////////////////////////////////////////////////////////
aux[k] = *(d_temp + (tid*numStations + k)) | *(d_rest + k + (i*numStations));
//actualizacin de vector aux con el AND siempre y cuando la bandera no se
//haya disparado.
if (!flag) {
aux_v = cum_v & aux[k];
if (aux_v > 0) {
flag = true;
}
cum_v = cum_v^aux[k];
}
}
if (!flag) {
//Tabla VALIDA
aux_v = atomicAdd(d_cont, 1);
for (int j = 0; j < numStations; j++) {
*(d_out + (aux_v*numStations) + j) = aux[j];
}
printf("VALIDA %d, %d\n", tid, i);
flag = false;
aux_v = 0;
cum_v = 0;
}
else {
//Tabla INVALIDA
printf("INVALIDA %d, %d\n", tid, i);
flag = false;
aux_v = 0;
cum_v = 0;
}
}
//Termina forloop
//free memory from the variables
}
}
extern "C" bool
runTest(int argc, const char **argv, unsigned long long int * h_cont, int numStations, unsigned int * h_temp, unsigned long long int h_temp_size , unsigned int * h_rest, int h_rest_size, unsigned int * h_out) {
//use command-line specified CUDA device, otherwise use device with highest Gflops
//esta linea tal vez no sea necesaria, al fin slo tomar la GPU del servidor.
findCudaDevice(argc, (const char **)argv);
//Calcula los threads y los bloques.
unsigned int num_threads = h_temp_size / numStations;
unsigned int num_blocks = 1 + (num_threads / block_width);
cout << "Number of threads: " << num_threads << endl;
cout << "Number of Blocks: " << num_blocks << endl;
//GPU memory status variables
size_t total_mem, free_mem;
//Memory requirement for restriction
const size_t mem_size_rest = sizeof(int) * h_rest_size;
//Create Device Variables
unsigned int * d_rest;
unsigned int * d_out;
unsigned long long int * d_cont;
unsigned long long int * internal_cont = new unsigned long long int[1];
*(internal_cont) = 0;
//Se hacen las reservas de memoria en la gpu.
checkCudaErrors(hipMalloc((void **)&d_rest, mem_size_rest));
checkCudaErrors(hipMalloc((void **)&d_cont, sizeof(unsigned long long int)));
//se copia de lamemoria el host al device.
checkCudaErrors(hipMemcpy(d_rest, h_rest, mem_size_rest, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_cont, h_cont, sizeof(unsigned long long int), hipMemcpyHostToDevice));
//reporte de la memoria.
hipMemGetInfo(&free_mem, &total_mem);
printf("Free Memory: %s \tTotal Memory: %s \n", to_string(free_mem), to_string(total_mem));
//cout << "Out vector byte size: " << to_string(mem_size_out) << endl;
//cout << "Rest vector size: " << h_rest_size << endl;
//printf("Out vector size: %s\n", to_string((h_temp_size * (h_rest_size / numStations))));
//Declare a 1 MB constant
const size_t MB = 1 << 20;
//Buffer Conffiguration: Servir para administrar los batches en la GPU.
hipError_t status;
unsigned int * buffer;
//Allocate at mmost 500MB of memory for the buffer size.
//This limitation will allow us to allocate enough memory to store the resulting solutions vector.
size_t buffer_size = ::min((long long)4294967296 / (h_rest_size / numStations), (long long)536870912);
//Make sure the allocation is succesful
for (; buffer_size > MB; buffer_size -= MB) {
//Allocate memory for buffer
status = hipMalloc((void **)&buffer, buffer_size);
if (status == hipSuccess)
break;
}
//Bytes reservados en la GPU
cout << "Allocated " << buffer_size << " bytes on GPU. " << endl;
cout << h_temp_size << " int items require processing. " << endl;
//PRINT: Batch size.
//Tamao del buffer auxiliar, ayua a preservar la integridad del vector de soluciones.
size_t aux_buff_size = (buffer_size / sizeof(int)) % numStations;
//Numero de elementos enteros que sern procesados en este batch.
size_t batchN = (buffer_size - aux_buff_size) / sizeof(int);
//elementos a la espera de procesarse.
size_t remainN = h_temp_size;
int elements_out = batchN * (h_rest_size / numStations);
const size_t mem_size_out = elements_out * sizeof(int);
checkCudaErrors(hipMalloc((void **)&d_out, mem_size_out));
cout << "Number of int proccessed in this batch: " << batchN << endl;
//Loop sobre los datos del host, en batches prdefinidos arriba.
for (; remainN > 0; remainN -= batchN) {
cout << "\n BATCH \n\n";
//Si remain es menor que el tamano del batch,
//ajusto el tamano del batch a lo que queda de datos
//y en caso contrario lo dejo igual
batchN = (remainN<batchN) ? remainN : batchN;
size_t worksize = batchN * sizeof(int);
cout << "Processing " << batchN << "/" << remainN << " items remaining." << endl;
//COPY values from host temporal solution to buffer.
//Process the items in buffer.
hipMemcpy(buffer, h_temp, worksize, hipMemcpyHostToDevice);
//La cantidad de threads se determina por el nmero de tablas de solucion en el vector.
num_threads = batchN / numStations;
num_blocks = 1 + (num_threads / block_width);
//Prints
cout << "Number of threads: " << num_threads << endl;
cout << "Number of Blocks: " << num_blocks << endl;
//Lanzamos el kernel.
ParallelMix << < num_blocks, block_width >> >(d_cont, num_threads, h_rest_size / numStations, numStations, buffer, d_rest, d_out);
cout << "Succesful execution\n";
//copiamos el contador de tablas de lgpu al cpu.
gpuErrchk(hipMemcpy(h_cont, d_cont, sizeof(unsigned long long int), hipMemcpyDeviceToHost));
*(internal_cont) += *(h_cont);
cout << "Succesful execution\n";
//Se copin las soluciones validas al host.
//d_cont es el numero de soluciones de tablas vlidas a partir ed que se corri el kernel.
cout << *(h_cont) << endl;
cout << "ACUM: " << *(internal_cont) << endl;
gpuErrchk(hipMemcpy(h_out, d_out,*(h_cont)*numStations*sizeof(int), hipMemcpyDeviceToHost));
//Se actualizan apuntadores en la magnitud del batch que se proces.
h_temp += batchN;
//Se actualiza el apuntado h_put en la magnitud del numero de soluciones validas del batch procesado.
h_out = h_out + (*(h_cont)*numStations);
gpuErrchk(hipMemset(d_cont, (unsigned long long int)0, sizeof(unsigned long long int)));
gpuErrchk(hipMemset(d_out, 0,mem_size_out));
}
//GPU memory status REPORT.
hipMemGetInfo(&free_mem, &total_mem);
printf("Free Memory: %s \tTotal Memory: %s \n", to_string(free_mem), to_string(total_mem));
*(h_cont) = *(internal_cont);
//cleanup device memory
//Reset Device
hipDeviceReset();
return 0;
}
| 4f071d7ffae6601a0dd9d9d18f587f019224b0d4.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <iostream>
#include <vector>
//includes de CUDA
#include <cuda.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "device_functions.h"
//Clases necesarias
#include "BinaryRestriction.h"
#include "VectorBR.h"
#include "BinaryMixer.h"
//__device__ int cont = 0;
int block_width = 1024;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: \"%s\": %s %s %d\n", cudaGetErrorName(code), cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//funcion que me regrese el contador
__global__ void ParallelMix(unsigned long long int * d_cont ,unsigned int number_threads, int d_rest_size ,int numStations, unsigned int * d_temp, unsigned int * d_rest, unsigned int * d_out) {
//ID del thread
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
//Condition that verifies the usefulness of the thread.
if (tid < number_threads) {
//VARIABLES
//Esta variable auxiliar denota la doble asignación de una actividad si es >0.
unsigned int aux_v = 0;
//Esta otra variable denota que actividades se hana asignado.
unsigned int cum_v = 0;
//Bandera que se dispara si una actividade se ha asigando dos veces.
bool flag = false;
//Este vector auxiliar es una cota, segun el enumero de estaciones de un problema.
//Por el momento dejarlo en 7. (revisar despues con el doc)
unsigned int aux[7] = { 0,0,0,0,0,0,0 };
//Imprimimos el thread
//printf("Thread Num: %d \n", number_threads);
//printf("Thread ID: %d \n", tid);
//for donde se itern sobre todas las tablas de solución en Ri.
//Cada thread intersectará cada tabla de solución en la nueva restriccion d_temp.
for (int i = 0; i <d_rest_size ; i++) {
//Perform OR operations on every line (STATION) of the solution table.
//Se hace un or en cada linea (osea cad estación) de la tabla de solución.
//Esto para identificar si una actividad esta asignada a más de una estación.
for (int k = 0; k<numStations; k++) {
// El pseudo código del algoritmo de intersección booleana es el siguiente.
//1. Perform OR operation on table row (k) from tid solution table (d_temp) and new constraint (d_rest).
//1. Primero haz un or sobre la fila k (osea estación) de la tabla de solución d_temp y la nueva restriccioń d_rest.
// Se guarda el resultado en aux[k].cx, que declaramos arriba.
// 1.1. Realizar operación AND con aux[k] y cum_v.
///////////////////////////////////////////////////////////////////////////////
aux[k] = *(d_temp + (tid*numStations + k)) | *(d_rest + k + (i*numStations));
//actualización de vector aux con el AND siempre y cuando la bandera no se
//haya disparado.
if (!flag) {
aux_v = cum_v & aux[k];
if (aux_v > 0) {
flag = true;
}
cum_v = cum_v^aux[k];
}
}
if (!flag) {
//Tabla VALIDA
aux_v = atomicAdd(d_cont, 1);
for (int j = 0; j < numStations; j++) {
*(d_out + (aux_v*numStations) + j) = aux[j];
}
printf("VALIDA %d, %d\n", tid, i);
flag = false;
aux_v = 0;
cum_v = 0;
}
else {
//Tabla INVALIDA
printf("INVALIDA %d, %d\n", tid, i);
flag = false;
aux_v = 0;
cum_v = 0;
}
}
//Termina forloop
//free memory from the variables
}
}
extern "C" bool
runTest(int argc, const char **argv, unsigned long long int * h_cont, int numStations, unsigned int * h_temp, unsigned long long int h_temp_size , unsigned int * h_rest, int h_rest_size, unsigned int * h_out) {
//use command-line specified CUDA device, otherwise use device with highest Gflops
//esta linea tal vez no sea necesaria, al fin sólo tomará la GPU del servidor.
findCudaDevice(argc, (const char **)argv);
//Calcula los threads y los bloques.
unsigned int num_threads = h_temp_size / numStations;
unsigned int num_blocks = 1 + (num_threads / block_width);
cout << "Number of threads: " << num_threads << endl;
cout << "Number of Blocks: " << num_blocks << endl;
//GPU memory status variables
size_t total_mem, free_mem;
//Memory requirement for restriction
const size_t mem_size_rest = sizeof(int) * h_rest_size;
//Create Device Variables
unsigned int * d_rest;
unsigned int * d_out;
unsigned long long int * d_cont;
unsigned long long int * internal_cont = new unsigned long long int[1];
*(internal_cont) = 0;
//Se hacen las reservas de memoria en la gpu.
checkCudaErrors(cudaMalloc((void **)&d_rest, mem_size_rest));
checkCudaErrors(cudaMalloc((void **)&d_cont, sizeof(unsigned long long int)));
//se copia de lamemoria el host al device.
checkCudaErrors(cudaMemcpy(d_rest, h_rest, mem_size_rest, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_cont, h_cont, sizeof(unsigned long long int), cudaMemcpyHostToDevice));
//reporte de la memoria.
cudaMemGetInfo(&free_mem, &total_mem);
printf("Free Memory: %s \tTotal Memory: %s \n", to_string(free_mem), to_string(total_mem));
//cout << "Out vector byte size: " << to_string(mem_size_out) << endl;
//cout << "Rest vector size: " << h_rest_size << endl;
//printf("Out vector size: %s\n", to_string((h_temp_size * (h_rest_size / numStations))));
//Declare a 1 MB constant
const size_t MB = 1 << 20;
//Buffer Conffiguration: Servirá para administrar los batches en la GPU.
cudaError_t status;
unsigned int * buffer;
//Allocate at mmost 500MB of memory for the buffer size.
//This limitation will allow us to allocate enough memory to store the resulting solutions vector.
size_t buffer_size = std::min((long long)4294967296 / (h_rest_size / numStations), (long long)536870912);
//Make sure the allocation is succesful
for (; buffer_size > MB; buffer_size -= MB) {
//Allocate memory for buffer
status = cudaMalloc((void **)&buffer, buffer_size);
if (status == cudaSuccess)
break;
}
//Bytes reservados en la GPU
cout << "Allocated " << buffer_size << " bytes on GPU. " << endl;
cout << h_temp_size << " int items require processing. " << endl;
//PRINT: Batch size.
//Tamaño del buffer auxiliar, ayua a preservar la integridad del vector de soluciones.
size_t aux_buff_size = (buffer_size / sizeof(int)) % numStations;
//Numero de elementos enteros que serán procesados en este batch.
size_t batchN = (buffer_size - aux_buff_size) / sizeof(int);
//elementos a la espera de procesarse.
size_t remainN = h_temp_size;
int elements_out = batchN * (h_rest_size / numStations);
const size_t mem_size_out = elements_out * sizeof(int);
checkCudaErrors(cudaMalloc((void **)&d_out, mem_size_out));
cout << "Number of int proccessed in this batch: " << batchN << endl;
//Loop sobre los datos del host, en batches prdefinidos arriba.
for (; remainN > 0; remainN -= batchN) {
cout << "\n BATCH \n\n";
//Si remain es menor que el tamano del batch,
//ajusto el tamano del batch a lo que queda de datos
//y en caso contrario lo dejo igual
batchN = (remainN<batchN) ? remainN : batchN;
size_t worksize = batchN * sizeof(int);
cout << "Processing " << batchN << "/" << remainN << " items remaining." << endl;
//COPY values from host temporal solution to buffer.
//Process the items in buffer.
cudaMemcpy(buffer, h_temp, worksize, cudaMemcpyHostToDevice);
//La cantidad de threads se determina por el número de tablas de solucion en el vector.
num_threads = batchN / numStations;
num_blocks = 1 + (num_threads / block_width);
//Prints
cout << "Number of threads: " << num_threads << endl;
cout << "Number of Blocks: " << num_blocks << endl;
//Lanzamos el kernel.
ParallelMix << < num_blocks, block_width >> >(d_cont, num_threads, h_rest_size / numStations, numStations, buffer, d_rest, d_out);
cout << "Succesful execution\n";
//copiamos el contador de tablas de lgpu al cpu.
gpuErrchk(cudaMemcpy(h_cont, d_cont, sizeof(unsigned long long int), cudaMemcpyDeviceToHost));
*(internal_cont) += *(h_cont);
cout << "Succesful execution\n";
//Se copin las soluciones validas al host.
//d_cont es el numero de soluciones de tablas válidas a partir ed que se corrió el kernel.
cout << *(h_cont) << endl;
cout << "ACUM: " << *(internal_cont) << endl;
gpuErrchk(cudaMemcpy(h_out, d_out,*(h_cont)*numStations*sizeof(int), cudaMemcpyDeviceToHost));
//Se actualizan apuntadores en la magnitud del batch que se procesó.
h_temp += batchN;
//Se actualiza el apuntado h_put en la magnitud del numero de soluciones validas del batch procesado.
h_out = h_out + (*(h_cont)*numStations);
gpuErrchk(cudaMemset(d_cont, (unsigned long long int)0, sizeof(unsigned long long int)));
gpuErrchk(cudaMemset(d_out, 0,mem_size_out));
}
//GPU memory status REPORT.
cudaMemGetInfo(&free_mem, &total_mem);
printf("Free Memory: %s \tTotal Memory: %s \n", to_string(free_mem), to_string(total_mem));
*(h_cont) = *(internal_cont);
//cleanup device memory
//Reset Device
cudaDeviceReset();
return 0;
}
|
04275ca2ff0fedba9f5d28ed2c44c0f3b4b93c4e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.7
* copyright (c) 2020, Universidad Politcnica de Valencia (UPV), PRHLT Research Centre
* Date: April 2020
* Author: PRHLT Research Centre, UPV, (rparedes@prhlt.upv.es), (jon@prhlt.upv.es)
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include "eddl/hardware/gpu/gpu_kernels.h"
__global__ void gpu_max_d(float *D, float *PD, float *map, int size, int reduction_size, bool argmax){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int offset = thread_id_x*reduction_size;
// Choose if we're getting the maximum value or the position
if(argmax) {
int argmax_addr = map[thread_id_x];
PD[offset+argmax_addr] += D[thread_id_x];
}else{
PD[offset+thread_id_x] += D[thread_id_x];;
}
}
}
__global__ void gpu_max(float *A, float *B, int *map, int size, int size_reduction, bool argmax){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp_max = A[*base_addr];
int tmp_argmax = 0;
float val;
for(int i=1; i<size_reduction; i++){
val = A[*(base_addr+i)];
if(val > tmp_max){
tmp_max = val;
tmp_argmax = i;
}
}
// Choose if we're getting the maximum value or the position
if(argmax) {
B[thread_id_x] = (float)tmp_argmax;
}else{
B[thread_id_x] = tmp_max;
}
}
}
__global__ void gpu_min(float *A, float *B, int *map, int size, int size_reduction, bool argmin){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp_min = A[*base_addr];
int tmp_argmin = 0;
float val;
for(int i=1; i<size_reduction; i++){
val = A[*(base_addr+i)];
if(val < tmp_min){
tmp_min = val;
tmp_argmin = i;
}
}
// Choose if we're getting the minimum value or the position
if(argmin) {
B[thread_id_x] = (float)tmp_argmin;
}else{
B[thread_id_x] = tmp_min;
}
}
}
__global__ void gpu_sum(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp += A[*(base_addr+i)];
}
B[thread_id_x] = tmp;
}
}
__global__ void gpu_sum_abs(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp += abs(A[*(base_addr+i)]);
}
B[thread_id_x] = tmp;
}
}
__global__ void gpu_prod(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 1.0f;
for(int i=0; i<size_reduction; i++){
tmp *= A[*(base_addr+i)];
}
B[thread_id_x] = tmp;
}
}
__global__ void gpu_mean(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp += A[*(base_addr+i)];
}
B[thread_id_x] = tmp/(float)size_reduction;
}
}
__global__ void gpu_median(float *A, float *B, int *map, int size, int size_reduction, float *aux){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
// Copy values
long int offset = thread_id_x*size_reduction;
for(int i=0; i<size_reduction; i++){
aux[offset+i] = A[map[offset+i]];
}
// Sort data
thrust::sort(thrust::device, aux + offset, aux + offset + size_reduction);
// Get median
int midpoint = (int)offset + size_reduction/ 2;
if(size_reduction % 2==1 && size_reduction>1) {
B[thread_id_x] = aux[midpoint];
}else{
B[thread_id_x] = (aux[midpoint-1]+aux[midpoint])/2.0f;
}
}
}
__global__ void gpu_var(float *A, float *B, int *map, int size, int size_reduction, bool unbiased){
// IMPORTANT TRICK: B ALREADY CONTAINS THE MEAN!!!!!!!
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp;
float sum = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp = A[*(base_addr+i)] - B[thread_id_x];
sum += tmp*tmp;
}
if(unbiased){
B[thread_id_x] = sum/((float)size_reduction-1.0f);
} else {
B[thread_id_x] = sum/(float)size_reduction;
}
}
}
__global__ void gpu_norm_fro(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
float val;
for(int i=0; i<size_reduction; i++){
val = A[*(base_addr+i)];
tmp += val*val;
}
B[thread_id_x] = sqrt(tmp);
}
}
__global__ void gpu_mode(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
// Copy values
int *values = new int[size_reduction]; // Dynamic allocation is not the best approach
for(int i=0; i<size_reduction; i++){
values[i] = (int)A[*(base_addr+i)];
}
// Sort data
thrust::sort(thrust::seq, values, values + size_reduction);
// Get most frequent element
int most_frequent_val;
int most_frequent_times = 0;
int val = values[0];
int frequency = 1;
for(int i=1; i<size_reduction; i++){
// Check if the value has change
if(val==values[i]){
frequency++;
}else{
val = values[i];
frequency = 1;
}
// Check frequency
if(frequency>most_frequent_times){
most_frequent_val = val;
most_frequent_times = frequency;
}
}
// Assign most frequent value
B[thread_id_x] = (float)most_frequent_val;
// Delete temp array
delete[] values;
}
}
/* PREVIOUS REDUCES ***********************************/
__global__ void reduce_mean(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
atomicAdd(&(B[map[thread_id_x]]),A[thread_id_x]);
}
}
__global__ void reduce_op_sum(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]+=B[map[thread_id_x]];
}
}
__global__ void reduce_op_diff(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]-=B[map[thread_id_x]];
}
}
__global__ void reduce_op_mult(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]*=B[map[thread_id_x]];
}
}
__global__ void reduce_op_div(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]/=B[map[thread_id_x]];
}
}
//dim3 dimGrid(RD->index.size());
//dim3 dimBlock(1);
__global__ void reduction_kernel(float *I,float *O,float *S,int m, int keepdims,int d,int *ind,int rs)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
int j;
float sum=0;
float v,val;
int i;
int p=rs*blockIdx.x;
for(j=0;j<rs;j++,p++) {
v=I[ind[p]];
if (m==2) {
if (j==0) {val=v;i=p;}
else if (v>val) {
val=v;
i=p;
}
}
else if (m==3) {
if (j==0) {val=v;i=p;}
else if (v<val) {
val=v;
i=p;
}
}
else sum+=v;
}
p=rs*blockIdx.x;
// set in Output
if (m<2) { // mean or sum
if (m==0) sum/=d;
if (keepdims) {
for(j=0;j<rs;j++,p++)
O[ind[p]]=sum;
}
else O[thread_id_x]=sum;
}
else { // rs or min
if (keepdims) {
for(j=0;j<rs;j++,p++) {
O[ind[p]]=val;
S[ind[p]]=i;
}
}
else {
O[thread_id_x]=val;
S[thread_id_x]=i;
}
}
}
//dim3 dimGrid(RD->index.size());
//dim3 dimBlock(1);
__global__ void reduction_back_kernel(float *I,float *O,float *S,int m, int keepdims,int d,int *ind,int rs)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
int j;
float val=0;
int p;
// set in Delta
if (m>=2) {
int p=S[thread_id_x];
O[p]+=I[thread_id_x];
}
else {
p=rs*blockIdx.x;
if(keepdims) {
for(j=0;j<rs;j++,p++)
val+=I[ind[p]];
}
else val=I[thread_id_x];
if (m==0) val/=d;
p=rs*blockIdx.x;
for(j=0;j<rs;j++,p++)
O[ind[p]]+=val;
}
}
////////////////////
// FOR SUM and MEAN
// Faster in Conv
///////////////////
//dim3 dimGrid(red_size);
//dim3 dimBlock(RD->index.size());
__global__ void reduction_permute(float *I,float *O,int *ind,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size)
O[thread_id_x]=I[ind[thread_id_x]];
}
__global__ void reduction_kernel_keep(float *red, float *O, int *ind, int size, int rsize)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size*rsize) {
O[ind[thread_id_x]]=red[thread_id_x/rsize];
}
}
__global__ void reduction_kernel_keep_inc(float *red, float *O, int *ind, int size, int rsize)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size*rsize) {
O[ind[thread_id_x]]+=red[thread_id_x/rsize];
}
}
| 04275ca2ff0fedba9f5d28ed2c44c0f3b4b93c4e.cu | /*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.7
* copyright (c) 2020, Universidad Politécnica de Valencia (UPV), PRHLT Research Centre
* Date: April 2020
* Author: PRHLT Research Centre, UPV, (rparedes@prhlt.upv.es), (jon@prhlt.upv.es)
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cuda.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include "eddl/hardware/gpu/gpu_kernels.h"
__global__ void gpu_max_d(float *D, float *PD, float *map, int size, int reduction_size, bool argmax){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int offset = thread_id_x*reduction_size;
// Choose if we're getting the maximum value or the position
if(argmax) {
int argmax_addr = map[thread_id_x];
PD[offset+argmax_addr] += D[thread_id_x];
}else{
PD[offset+thread_id_x] += D[thread_id_x];;
}
}
}
__global__ void gpu_max(float *A, float *B, int *map, int size, int size_reduction, bool argmax){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp_max = A[*base_addr];
int tmp_argmax = 0;
float val;
for(int i=1; i<size_reduction; i++){
val = A[*(base_addr+i)];
if(val > tmp_max){
tmp_max = val;
tmp_argmax = i;
}
}
// Choose if we're getting the maximum value or the position
if(argmax) {
B[thread_id_x] = (float)tmp_argmax;
}else{
B[thread_id_x] = tmp_max;
}
}
}
__global__ void gpu_min(float *A, float *B, int *map, int size, int size_reduction, bool argmin){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp_min = A[*base_addr];
int tmp_argmin = 0;
float val;
for(int i=1; i<size_reduction; i++){
val = A[*(base_addr+i)];
if(val < tmp_min){
tmp_min = val;
tmp_argmin = i;
}
}
// Choose if we're getting the minimum value or the position
if(argmin) {
B[thread_id_x] = (float)tmp_argmin;
}else{
B[thread_id_x] = tmp_min;
}
}
}
__global__ void gpu_sum(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp += A[*(base_addr+i)];
}
B[thread_id_x] = tmp;
}
}
__global__ void gpu_sum_abs(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp += abs(A[*(base_addr+i)]);
}
B[thread_id_x] = tmp;
}
}
__global__ void gpu_prod(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 1.0f;
for(int i=0; i<size_reduction; i++){
tmp *= A[*(base_addr+i)];
}
B[thread_id_x] = tmp;
}
}
__global__ void gpu_mean(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp += A[*(base_addr+i)];
}
B[thread_id_x] = tmp/(float)size_reduction;
}
}
__global__ void gpu_median(float *A, float *B, int *map, int size, int size_reduction, float *aux){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
// Copy values
long int offset = thread_id_x*size_reduction;
for(int i=0; i<size_reduction; i++){
aux[offset+i] = A[map[offset+i]];
}
// Sort data
thrust::sort(thrust::device, aux + offset, aux + offset + size_reduction);
// Get median
int midpoint = (int)offset + size_reduction/ 2;
if(size_reduction % 2==1 && size_reduction>1) {
B[thread_id_x] = aux[midpoint];
}else{
B[thread_id_x] = (aux[midpoint-1]+aux[midpoint])/2.0f;
}
}
}
__global__ void gpu_var(float *A, float *B, int *map, int size, int size_reduction, bool unbiased){
// IMPORTANT TRICK: B ALREADY CONTAINS THE MEAN!!!!!!!
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp;
float sum = 0.0f;
for(int i=0; i<size_reduction; i++){
tmp = A[*(base_addr+i)] - B[thread_id_x];
sum += tmp*tmp;
}
if(unbiased){
B[thread_id_x] = sum/((float)size_reduction-1.0f);
} else {
B[thread_id_x] = sum/(float)size_reduction;
}
}
}
__global__ void gpu_norm_fro(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
float tmp = 0.0f;
float val;
for(int i=0; i<size_reduction; i++){
val = A[*(base_addr+i)];
tmp += val*val;
}
B[thread_id_x] = sqrt(tmp);
}
}
__global__ void gpu_mode(float *A, float *B, int *map, int size, int size_reduction){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
int *base_addr = &map[thread_id_x*size_reduction+0];
// Copy values
int *values = new int[size_reduction]; // Dynamic allocation is not the best approach
for(int i=0; i<size_reduction; i++){
values[i] = (int)A[*(base_addr+i)];
}
// Sort data
thrust::sort(thrust::seq, values, values + size_reduction);
// Get most frequent element
int most_frequent_val;
int most_frequent_times = 0;
int val = values[0];
int frequency = 1;
for(int i=1; i<size_reduction; i++){
// Check if the value has change
if(val==values[i]){
frequency++;
}else{
val = values[i];
frequency = 1;
}
// Check frequency
if(frequency>most_frequent_times){
most_frequent_val = val;
most_frequent_times = frequency;
}
}
// Assign most frequent value
B[thread_id_x] = (float)most_frequent_val;
// Delete temp array
delete[] values;
}
}
/* PREVIOUS REDUCES ***********************************/
__global__ void reduce_mean(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
atomicAdd(&(B[map[thread_id_x]]),A[thread_id_x]);
}
}
__global__ void reduce_op_sum(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]+=B[map[thread_id_x]];
}
}
__global__ void reduce_op_diff(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]-=B[map[thread_id_x]];
}
}
__global__ void reduce_op_mult(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]*=B[map[thread_id_x]];
}
}
__global__ void reduce_op_div(float *A,float *B,int *map,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size) {
A[thread_id_x]/=B[map[thread_id_x]];
}
}
//dim3 dimGrid(RD->index.size());
//dim3 dimBlock(1);
__global__ void reduction_kernel(float *I,float *O,float *S,int m, int keepdims,int d,int *ind,int rs)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
int j;
float sum=0;
float v,val;
int i;
int p=rs*blockIdx.x;
for(j=0;j<rs;j++,p++) {
v=I[ind[p]];
if (m==2) {
if (j==0) {val=v;i=p;}
else if (v>val) {
val=v;
i=p;
}
}
else if (m==3) {
if (j==0) {val=v;i=p;}
else if (v<val) {
val=v;
i=p;
}
}
else sum+=v;
}
p=rs*blockIdx.x;
// set in Output
if (m<2) { // mean or sum
if (m==0) sum/=d;
if (keepdims) {
for(j=0;j<rs;j++,p++)
O[ind[p]]=sum;
}
else O[thread_id_x]=sum;
}
else { // rs or min
if (keepdims) {
for(j=0;j<rs;j++,p++) {
O[ind[p]]=val;
S[ind[p]]=i;
}
}
else {
O[thread_id_x]=val;
S[thread_id_x]=i;
}
}
}
//dim3 dimGrid(RD->index.size());
//dim3 dimBlock(1);
__global__ void reduction_back_kernel(float *I,float *O,float *S,int m, int keepdims,int d,int *ind,int rs)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
int j;
float val=0;
int p;
// set in Delta
if (m>=2) {
int p=S[thread_id_x];
O[p]+=I[thread_id_x];
}
else {
p=rs*blockIdx.x;
if(keepdims) {
for(j=0;j<rs;j++,p++)
val+=I[ind[p]];
}
else val=I[thread_id_x];
if (m==0) val/=d;
p=rs*blockIdx.x;
for(j=0;j<rs;j++,p++)
O[ind[p]]+=val;
}
}
////////////////////
// FOR SUM and MEAN
// Faster in Conv
///////////////////
//dim3 dimGrid(red_size);
//dim3 dimBlock(RD->index.size());
__global__ void reduction_permute(float *I,float *O,int *ind,int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size)
O[thread_id_x]=I[ind[thread_id_x]];
}
__global__ void reduction_kernel_keep(float *red, float *O, int *ind, int size, int rsize)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size*rsize) {
O[ind[thread_id_x]]=red[thread_id_x/rsize];
}
}
__global__ void reduction_kernel_keep_inc(float *red, float *O, int *ind, int size, int rsize)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x<size*rsize) {
O[ind[thread_id_x]]+=red[thread_id_x/rsize];
}
}
|
32ef67842b2ceee5d9586875657f28b45351974d.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/cudaarithm.hpp"
#include "opencv2/cudev.hpp"
#include "opencv2/core/private.cuda.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::cudev;
namespace
{
texture<uchar, hipTextureType1D, hipReadModeElementType> texLutTable;
class LookUpTableImpl : public LookUpTable
{
public:
LookUpTableImpl(InputArray lut);
~LookUpTableImpl();
void transform(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
private:
GpuMat d_lut;
hipTextureObject_t texLutTableObj;
bool cc30;
};
LookUpTableImpl::LookUpTableImpl(InputArray _lut)
{
if (_lut.kind() == _InputArray::CUDA_GPU_MAT)
{
d_lut = _lut.getGpuMat();
}
else
{
Mat h_lut = _lut.getMat();
d_lut.upload(Mat(1, 256, h_lut.type(), h_lut.data));
}
CV_Assert( d_lut.depth() == CV_8U );
CV_Assert( d_lut.rows == 1 && d_lut.cols == 256 );
cc30 = deviceSupports(FEATURE_SET_COMPUTE_30);
if (cc30)
{
// Use the texture object
hipResourceDesc texRes;
std::memset(&texRes, 0, sizeof(texRes));
texRes.resType = hipResourceTypeLinear;
texRes.res.linear.devPtr = d_lut.data;
texRes.res.linear.desc = hipCreateChannelDesc<uchar>();
texRes.res.linear.sizeInBytes = 256 * d_lut.channels() * sizeof(uchar);
hipTextureDesc texDescr;
std::memset(&texDescr, 0, sizeof(texDescr));
CV_CUDEV_SAFE_CALL( hipCreateTextureObject(&texLutTableObj, &texRes, &texDescr, 0) );
}
else
{
// Use the texture reference
hipChannelFormatDesc desc = hipCreateChannelDesc<uchar>();
CV_CUDEV_SAFE_CALL( hipBindTexture(0, &texLutTable, d_lut.data, &desc) );
}
}
LookUpTableImpl::~LookUpTableImpl()
{
if (cc30)
{
// Use the texture object
hipDestroyTextureObject(texLutTableObj);
}
else
{
// Use the texture reference
hipUnbindTexture(texLutTable);
}
}
struct LutTablePtrC1
{
typedef uchar value_type;
typedef uchar index_type;
hipTextureObject_t texLutTableObj;
__device__ __forceinline__ uchar operator ()(uchar, uchar x) const
{
#if CV_CUDEV_ARCH < 300
// Use the texture reference
return tex1Dfetch(texLutTable, x);
#else
// Use the texture object
return tex1Dfetch<uchar>(texLutTableObj, x);
#endif
}
};
struct LutTablePtrC3
{
typedef uchar3 value_type;
typedef uchar3 index_type;
hipTextureObject_t texLutTableObj;
__device__ __forceinline__ uchar3 operator ()(const uchar3&, const uchar3& x) const
{
#if CV_CUDEV_ARCH < 300
// Use the texture reference
return make_uchar3(tex1Dfetch(texLutTable, x.x * 3), tex1Dfetch(texLutTable, x.y * 3 + 1), tex1Dfetch(texLutTable, x.z * 3 + 2));
#else
// Use the texture object
return make_uchar3(tex1Dfetch<uchar>(texLutTableObj, x.x * 3), tex1Dfetch<uchar>(texLutTableObj, x.y * 3 + 1), tex1Dfetch<uchar>(texLutTableObj, x.z * 3 + 2));
#endif
}
};
void LookUpTableImpl::transform(InputArray _src, OutputArray _dst, Stream& stream)
{
GpuMat src = getInputMat(_src, stream);
const int cn = src.channels();
const int lut_cn = d_lut.channels();
CV_Assert( src.type() == CV_8UC1 || src.type() == CV_8UC3 );
CV_Assert( lut_cn == 1 || lut_cn == cn );
GpuMat dst = getOutputMat(_dst, src.size(), src.type(), stream);
if (lut_cn == 1)
{
GpuMat_<uchar> src1(src.reshape(1));
GpuMat_<uchar> dst1(dst.reshape(1));
LutTablePtrC1 tbl;
tbl.texLutTableObj = texLutTableObj;
dst1.assign(lut_(src1, tbl), stream);
}
else if (lut_cn == 3)
{
GpuMat_<uchar3>& src3 = (GpuMat_<uchar3>&) src;
GpuMat_<uchar3>& dst3 = (GpuMat_<uchar3>&) dst;
LutTablePtrC3 tbl;
tbl.texLutTableObj = texLutTableObj;
dst3.assign(lut_(src3, tbl), stream);
}
syncOutput(dst, _dst, stream);
}
}
Ptr<LookUpTable> cv::cuda::createLookUpTable(InputArray lut)
{
return makePtr<LookUpTableImpl>(lut);
}
#endif
| 32ef67842b2ceee5d9586875657f28b45351974d.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/cudaarithm.hpp"
#include "opencv2/cudev.hpp"
#include "opencv2/core/private.cuda.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::cudev;
namespace
{
texture<uchar, cudaTextureType1D, cudaReadModeElementType> texLutTable;
class LookUpTableImpl : public LookUpTable
{
public:
LookUpTableImpl(InputArray lut);
~LookUpTableImpl();
void transform(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
private:
GpuMat d_lut;
cudaTextureObject_t texLutTableObj;
bool cc30;
};
LookUpTableImpl::LookUpTableImpl(InputArray _lut)
{
if (_lut.kind() == _InputArray::CUDA_GPU_MAT)
{
d_lut = _lut.getGpuMat();
}
else
{
Mat h_lut = _lut.getMat();
d_lut.upload(Mat(1, 256, h_lut.type(), h_lut.data));
}
CV_Assert( d_lut.depth() == CV_8U );
CV_Assert( d_lut.rows == 1 && d_lut.cols == 256 );
cc30 = deviceSupports(FEATURE_SET_COMPUTE_30);
if (cc30)
{
// Use the texture object
cudaResourceDesc texRes;
std::memset(&texRes, 0, sizeof(texRes));
texRes.resType = cudaResourceTypeLinear;
texRes.res.linear.devPtr = d_lut.data;
texRes.res.linear.desc = cudaCreateChannelDesc<uchar>();
texRes.res.linear.sizeInBytes = 256 * d_lut.channels() * sizeof(uchar);
cudaTextureDesc texDescr;
std::memset(&texDescr, 0, sizeof(texDescr));
CV_CUDEV_SAFE_CALL( cudaCreateTextureObject(&texLutTableObj, &texRes, &texDescr, 0) );
}
else
{
// Use the texture reference
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar>();
CV_CUDEV_SAFE_CALL( cudaBindTexture(0, &texLutTable, d_lut.data, &desc) );
}
}
LookUpTableImpl::~LookUpTableImpl()
{
if (cc30)
{
// Use the texture object
cudaDestroyTextureObject(texLutTableObj);
}
else
{
// Use the texture reference
cudaUnbindTexture(texLutTable);
}
}
struct LutTablePtrC1
{
typedef uchar value_type;
typedef uchar index_type;
cudaTextureObject_t texLutTableObj;
__device__ __forceinline__ uchar operator ()(uchar, uchar x) const
{
#if CV_CUDEV_ARCH < 300
// Use the texture reference
return tex1Dfetch(texLutTable, x);
#else
// Use the texture object
return tex1Dfetch<uchar>(texLutTableObj, x);
#endif
}
};
struct LutTablePtrC3
{
typedef uchar3 value_type;
typedef uchar3 index_type;
cudaTextureObject_t texLutTableObj;
__device__ __forceinline__ uchar3 operator ()(const uchar3&, const uchar3& x) const
{
#if CV_CUDEV_ARCH < 300
// Use the texture reference
return make_uchar3(tex1Dfetch(texLutTable, x.x * 3), tex1Dfetch(texLutTable, x.y * 3 + 1), tex1Dfetch(texLutTable, x.z * 3 + 2));
#else
// Use the texture object
return make_uchar3(tex1Dfetch<uchar>(texLutTableObj, x.x * 3), tex1Dfetch<uchar>(texLutTableObj, x.y * 3 + 1), tex1Dfetch<uchar>(texLutTableObj, x.z * 3 + 2));
#endif
}
};
void LookUpTableImpl::transform(InputArray _src, OutputArray _dst, Stream& stream)
{
GpuMat src = getInputMat(_src, stream);
const int cn = src.channels();
const int lut_cn = d_lut.channels();
CV_Assert( src.type() == CV_8UC1 || src.type() == CV_8UC3 );
CV_Assert( lut_cn == 1 || lut_cn == cn );
GpuMat dst = getOutputMat(_dst, src.size(), src.type(), stream);
if (lut_cn == 1)
{
GpuMat_<uchar> src1(src.reshape(1));
GpuMat_<uchar> dst1(dst.reshape(1));
LutTablePtrC1 tbl;
tbl.texLutTableObj = texLutTableObj;
dst1.assign(lut_(src1, tbl), stream);
}
else if (lut_cn == 3)
{
GpuMat_<uchar3>& src3 = (GpuMat_<uchar3>&) src;
GpuMat_<uchar3>& dst3 = (GpuMat_<uchar3>&) dst;
LutTablePtrC3 tbl;
tbl.texLutTableObj = texLutTableObj;
dst3.assign(lut_(src3, tbl), stream);
}
syncOutput(dst, _dst, stream);
}
}
Ptr<LookUpTable> cv::cuda::createLookUpTable(InputArray lut)
{
return makePtr<LookUpTableImpl>(lut);
}
#endif
|
29155f4b10358a2ddfd372cdd6e8c4299e58fe41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Compute the center of each group.
*/
extern "C" __global__ void computeGroupCenters(const real4* __restrict__ posq, const int* __restrict__ groupParticles,
const real* __restrict__ groupWeights, const int* __restrict__ groupOffsets, real4* __restrict__ centerPositions) {
__shared__ volatile real3 temp[64];
for (int group = blockIdx.x; group < NUM_GROUPS; group += gridDim.x) {
// The threads in this block work together to compute the center one group.
int firstIndex = groupOffsets[group];
int lastIndex = groupOffsets[group+1];
real3 center = make_real3(0, 0, 0);
for (int index = threadIdx.x; index < lastIndex-firstIndex; index += blockDim.x) {
int atom = groupParticles[firstIndex+index];
real weight = groupWeights[firstIndex+index];
real4 pos = posq[atom];
center.x += weight*pos.x;
center.y += weight*pos.y;
center.z += weight*pos.z;
}
// Sum the values.
int thread = threadIdx.x;
temp[thread].x = center.x;
temp[thread].y = center.y;
temp[thread].z = center.z;
__syncthreads();
if (thread < 32) {
temp[thread].x += temp[thread+32].x;
temp[thread].y += temp[thread+32].y;
temp[thread].z += temp[thread+32].z;
if (thread < 16) {
temp[thread].x += temp[thread+16].x;
temp[thread].y += temp[thread+16].y;
temp[thread].z += temp[thread+16].z;
}
if (thread < 8) {
temp[thread].x += temp[thread+8].x;
temp[thread].y += temp[thread+8].y;
temp[thread].z += temp[thread+8].z;
}
if (thread < 4) {
temp[thread].x += temp[thread+4].x;
temp[thread].y += temp[thread+4].y;
temp[thread].z += temp[thread+4].z;
}
if (thread < 2) {
temp[thread].x += temp[thread+2].x;
temp[thread].y += temp[thread+2].y;
temp[thread].z += temp[thread+2].z;
}
}
if (thread == 0)
centerPositions[group] = make_real4(temp[0].x+temp[1].x, temp[0].y+temp[1].y, temp[0].z+temp[1].z, 0);
}
}
/**
* Convert a real4 to a real3 by removing its last element.
*/
inline __device__ real3 trim(real4 v) {
return make_real3(v.x, v.y, v.z);
}
/**
* Compute the difference between two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 delta(real4 vec1, real4 vec2) {
real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0);
result.w = result.x*result.x + result.y*result.y + result.z*result.z;
return result;
}
/**
* Compute the angle between two vectors. The w component of each vector should contain the squared magnitude.
*/
__device__ real computeAngle(real4 vec1, real4 vec2) {
real dotProduct = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
real cosine = dotProduct*RSQRT(vec1.w*vec2.w);
real angle;
if (cosine > 0.99f || cosine < -0.99f) {
// We're close to the singularity in acos(), so take the cross product and use asin() instead.
real3 crossProduct = cross(vec1, vec2);
real scale = vec1.w*vec2.w;
angle = ASIN(SQRT(dot(crossProduct, crossProduct)/scale));
if (cosine < 0.0f)
angle = M_PI-angle;
}
else
angle = ACOS(cosine);
return angle;
}
/**
* Compute the cross product of two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 computeCross(real4 vec1, real4 vec2) {
real3 cp = cross(vec1, vec2);
return make_real4(cp.x, cp.y, cp.z, cp.x*cp.x+cp.y*cp.y+cp.z*cp.z);
}
/**
* Compute the forces on groups based on the bonds.
*/
extern "C" __global__ void computeGroupForces(unsigned long long* __restrict__ groupForce, mixed* __restrict__ energyBuffer, const real4* __restrict__ centerPositions,
const int* __restrict__ bondGroups
EXTRA_ARGS) {
mixed energy = 0;
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < NUM_BONDS; index += blockDim.x*gridDim.x) {
COMPUTE_FORCE
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
/**
* Apply the forces from the group centers to the individual atoms.
*/
extern "C" __global__ void applyForcesToAtoms(const int* __restrict__ groupParticles, const real* __restrict__ groupWeights, const int* __restrict__ groupOffsets,
const long long* __restrict__ groupForce, unsigned long long* __restrict__ atomForce) {
for (int group = blockIdx.x; group < NUM_GROUPS; group += gridDim.x) {
long long fx = groupForce[group];
long long fy = groupForce[group+NUM_GROUPS];
long long fz = groupForce[group+NUM_GROUPS*2];
int firstIndex = groupOffsets[group];
int lastIndex = groupOffsets[group+1];
for (int index = threadIdx.x; index < lastIndex-firstIndex; index += blockDim.x) {
int atom = groupParticles[firstIndex+index];
real weight = groupWeights[firstIndex+index];
atomicAdd(&atomForce[atom], static_cast<unsigned long long>((long long) (fx*weight)));
atomicAdd(&atomForce[atom+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (fy*weight)));
atomicAdd(&atomForce[atom+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (fz*weight)));
}
}
}
| 29155f4b10358a2ddfd372cdd6e8c4299e58fe41.cu | /**
* Compute the center of each group.
*/
extern "C" __global__ void computeGroupCenters(const real4* __restrict__ posq, const int* __restrict__ groupParticles,
const real* __restrict__ groupWeights, const int* __restrict__ groupOffsets, real4* __restrict__ centerPositions) {
__shared__ volatile real3 temp[64];
for (int group = blockIdx.x; group < NUM_GROUPS; group += gridDim.x) {
// The threads in this block work together to compute the center one group.
int firstIndex = groupOffsets[group];
int lastIndex = groupOffsets[group+1];
real3 center = make_real3(0, 0, 0);
for (int index = threadIdx.x; index < lastIndex-firstIndex; index += blockDim.x) {
int atom = groupParticles[firstIndex+index];
real weight = groupWeights[firstIndex+index];
real4 pos = posq[atom];
center.x += weight*pos.x;
center.y += weight*pos.y;
center.z += weight*pos.z;
}
// Sum the values.
int thread = threadIdx.x;
temp[thread].x = center.x;
temp[thread].y = center.y;
temp[thread].z = center.z;
__syncthreads();
if (thread < 32) {
temp[thread].x += temp[thread+32].x;
temp[thread].y += temp[thread+32].y;
temp[thread].z += temp[thread+32].z;
if (thread < 16) {
temp[thread].x += temp[thread+16].x;
temp[thread].y += temp[thread+16].y;
temp[thread].z += temp[thread+16].z;
}
if (thread < 8) {
temp[thread].x += temp[thread+8].x;
temp[thread].y += temp[thread+8].y;
temp[thread].z += temp[thread+8].z;
}
if (thread < 4) {
temp[thread].x += temp[thread+4].x;
temp[thread].y += temp[thread+4].y;
temp[thread].z += temp[thread+4].z;
}
if (thread < 2) {
temp[thread].x += temp[thread+2].x;
temp[thread].y += temp[thread+2].y;
temp[thread].z += temp[thread+2].z;
}
}
if (thread == 0)
centerPositions[group] = make_real4(temp[0].x+temp[1].x, temp[0].y+temp[1].y, temp[0].z+temp[1].z, 0);
}
}
/**
* Convert a real4 to a real3 by removing its last element.
*/
inline __device__ real3 trim(real4 v) {
return make_real3(v.x, v.y, v.z);
}
/**
* Compute the difference between two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 delta(real4 vec1, real4 vec2) {
real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0);
result.w = result.x*result.x + result.y*result.y + result.z*result.z;
return result;
}
/**
* Compute the angle between two vectors. The w component of each vector should contain the squared magnitude.
*/
__device__ real computeAngle(real4 vec1, real4 vec2) {
real dotProduct = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
real cosine = dotProduct*RSQRT(vec1.w*vec2.w);
real angle;
if (cosine > 0.99f || cosine < -0.99f) {
// We're close to the singularity in acos(), so take the cross product and use asin() instead.
real3 crossProduct = cross(vec1, vec2);
real scale = vec1.w*vec2.w;
angle = ASIN(SQRT(dot(crossProduct, crossProduct)/scale));
if (cosine < 0.0f)
angle = M_PI-angle;
}
else
angle = ACOS(cosine);
return angle;
}
/**
* Compute the cross product of two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 computeCross(real4 vec1, real4 vec2) {
real3 cp = cross(vec1, vec2);
return make_real4(cp.x, cp.y, cp.z, cp.x*cp.x+cp.y*cp.y+cp.z*cp.z);
}
/**
* Compute the forces on groups based on the bonds.
*/
extern "C" __global__ void computeGroupForces(unsigned long long* __restrict__ groupForce, mixed* __restrict__ energyBuffer, const real4* __restrict__ centerPositions,
const int* __restrict__ bondGroups
EXTRA_ARGS) {
mixed energy = 0;
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < NUM_BONDS; index += blockDim.x*gridDim.x) {
COMPUTE_FORCE
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
/**
* Apply the forces from the group centers to the individual atoms.
*/
extern "C" __global__ void applyForcesToAtoms(const int* __restrict__ groupParticles, const real* __restrict__ groupWeights, const int* __restrict__ groupOffsets,
const long long* __restrict__ groupForce, unsigned long long* __restrict__ atomForce) {
for (int group = blockIdx.x; group < NUM_GROUPS; group += gridDim.x) {
long long fx = groupForce[group];
long long fy = groupForce[group+NUM_GROUPS];
long long fz = groupForce[group+NUM_GROUPS*2];
int firstIndex = groupOffsets[group];
int lastIndex = groupOffsets[group+1];
for (int index = threadIdx.x; index < lastIndex-firstIndex; index += blockDim.x) {
int atom = groupParticles[firstIndex+index];
real weight = groupWeights[firstIndex+index];
atomicAdd(&atomForce[atom], static_cast<unsigned long long>((long long) (fx*weight)));
atomicAdd(&atomForce[atom+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (fy*weight)));
atomicAdd(&atomForce[atom+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (fz*weight)));
}
}
}
|
f80de425b6ff43d49fb048a0973573ed0e62cd87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define __min(x,y) ((x)<(y)?(x):(y))
;
#define N 1024
extern int hipMemcpy(); //Memcopy
extern int hipFree();
extern void __syncthreads();
extern int hipMemcpyToSymbol(); //Memcopy
extern __global__ void mv_GPU(float *, float (*)[N], float *); //Kernal
int compare(float *a, float *b, int size, double threshold) {
printf("CHECK2\n");
int i;
for (i=0; i<size; i++) {
if (abs(a[i]-b[i]) > threshold) return 0;
}
return 1;
}
void normalMV(int *ptr, int nr, float *t, float *data, int *indices, float *b){
int i;
int j;
for (i=0; i<nr; i++) {
for (j = ptr[i]; j<ptr[i+1]; j++) {
t[i] = t[i] + data[j] * b[indices[j]];
}
}
/************************************************
int i;
int j;
for(i=0;i<N;i++)
for(j=0;j<N;j++)
a[i] = a[i] + c[j*N+i] * b[j] ;
************************************************/
}
extern __global__ void mv_GPU(int *ptr, int nr, float *t, float *data, int *indices, float *b)
{
int i;
int j;
for (i=0; i<nr; i++) {
for (j = ptr[i]; j<ptr[i+1]; j++) {
t[i] = t[i] + data[j] * b[indices[j]];
}
}
/*******************************************************************
int bx;
int tx;
float acpy;
__shared__ float bcpy[32];
int k;
int j;
bx = blockIdx.x;
tx = threadIdx.x;
int i = bx * blockDim.x + tx;
int tiles = (N+blockDim.x-1)/blockDim.x;
if (tx <= -(blockDim.x * bx) + N-1)
{
acpy = 0.0;
// suif_tmp0 = ((float *)(float (*)[])a)[i];
for (k = 0; k <= tiles; k++)
{
bcpy[tx] = b[blockDim.x * k + tx];
__syncthreads();
for (j = 32 * k; j <= __min(32 * k + 31, N-1); j++)
{
acpy = acpy + c[j][i] * bcpy[j - 32 * k];
}
__syncthreads();
}
a[i] = acpy;
}
****************************************************************/
}
main (int argc, char **argv) {
FILE *fp;
char line[1024];
int *ptr, *indices;
float *data, *bb, *t;
int i;
int n; // number of nonzero elements in data
int nr; // number of rows in matrix
int nc; // number of columns in matrix
// Open input file and read to end of comments
if (argc !=2) abort();
if ((fp = fopen(argv[1], "r")) == NULL) {
abort();
}
fgets(line, 128, fp);
while (line[0] == '%') {
fgets(line, 128, fp);
}
// Read number of rows (nr), number of columns (nc) and
// number of elements and allocate memory for ptr, indices, data, bb and t.
sscanf(line,"%d %d %d\n", &nr, &nc, &n);
ptr = (int *) malloc ((nr+1)*sizeof(int));
indices = (int *) malloc(n*sizeof(int));
data = (float *) malloc(n*sizeof(float));
bb = (float *) malloc(nc*sizeof(float));
t = (float *) malloc(nr*sizeof(float));
// Read data in coordinate format and initialize sparse matrix
int lastr=0;
for (i=0; i<n; i++) {
int r;
fscanf(fp,"%d %d %f\n", &r, &(indices[i]), &(data[i]));
indices[i]--; // start numbering at 0
if (r!=lastr) {
ptr[r-1] = i;
lastr = r;
}
}
ptr[nr] = n;
// initialize t to 0 and b with random data
for (i=0; i<nr; i++) {
t[i] = 0.0;
}
for (i=0; i<nc; i++) {
bb[i] = (float) rand()/1111111111;
}
// create CUDA event handles for timing purposes
hipEvent_t start_event, stop_event;
float elapsed_time_seq, elapsed_time_gpu;
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
hipEventRecord(start_event, 0);
normalMV(ptr,nr,t,data,indices, bb);
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&elapsed_time_seq,start_event, stop_event);
int *devO1Ptr; // int * ptr
int *devI1Ptr; // int * indices
float *devI2Ptr; // float * data
float *devI3Ptr; // float * b
float *devI4Ptr; // float * t
hipMalloc((void **)&devO1Ptr, (nr+1)*sizeof(int));
hipMemcpy(devO1Ptr, ptr, (nr+1)*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **)&devI1Ptr, n*sizeof(int));
hipMemcpy(devI1Ptr,indices,n*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **)&devI2Ptr,n*sizeof(float));
hipMemcpy(devI2Ptr,data,n*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **)&devI3Ptr,nc*sizeof(float));
hipMemcpy(devI3Ptr,bb,nc*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **)&devI4Ptr,nr*sizeof(float));
hipMemcpy(devI4Ptr, t,nr*sizeof(float), hipMemcpyHostToDevice);
dim3 dimGrid((N+31)/32, 1);
dim3 dimBlock(32, 1);
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
hipEventRecord(start_event, 0);
// int *ptr, int nr, float *t, float *data, int *indices, float *b
hipLaunchKernelGGL(( mv_GPU), dim3(dimGrid),dim3(dimBlock), 0, 0, devO1Ptr,nr,devI4Ptr,devI2Ptr,devI1Ptr,devI3Ptr);
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
float *d_a = NULL;
hipMemcpy(d_a,devI4Ptr,nr*sizeof(float), hipMemcpyDeviceToHost); //Memcopy
hipEventElapsedTime(&elapsed_time_gpu,start_event, stop_event);
hipFree(devO1Ptr);
hipFree(devI1Ptr);
hipFree(devI2Ptr);
hipFree(devI3Ptr);
hipFree(devI4Ptr);
printf("CHECK1\n");
int res = compare( t, d_a, nr, 0.001);
if (res == 1) {
printf("VALID!\n Sequential Time: %.2f msec\n Parallel Time: %.2f msec\n Speedup = %.2f\n",
elapsed_time_seq, elapsed_time_gpu, elapsed_time_seq/elapsed_time_gpu);
}
else printf("INVALID...\n");
}
| f80de425b6ff43d49fb048a0973573ed0e62cd87.cu | #include <stdio.h>
#define __min(x,y) ((x)<(y)?(x):(y))
;
#define N 1024
extern int cudaMemcpy(); //Memcopy
extern int cudaFree();
extern void __syncthreads();
extern int cudaMemcpyToSymbol(); //Memcopy
extern __global__ void mv_GPU(float *, float (*)[N], float *); //Kernal
int compare(float *a, float *b, int size, double threshold) {
printf("CHECK2\n");
int i;
for (i=0; i<size; i++) {
if (abs(a[i]-b[i]) > threshold) return 0;
}
return 1;
}
void normalMV(int *ptr, int nr, float *t, float *data, int *indices, float *b){
int i;
int j;
for (i=0; i<nr; i++) {
for (j = ptr[i]; j<ptr[i+1]; j++) {
t[i] = t[i] + data[j] * b[indices[j]];
}
}
/************************************************
int i;
int j;
for(i=0;i<N;i++)
for(j=0;j<N;j++)
a[i] = a[i] + c[j*N+i] * b[j] ;
************************************************/
}
extern __global__ void mv_GPU(int *ptr, int nr, float *t, float *data, int *indices, float *b)
{
int i;
int j;
for (i=0; i<nr; i++) {
for (j = ptr[i]; j<ptr[i+1]; j++) {
t[i] = t[i] + data[j] * b[indices[j]];
}
}
/*******************************************************************
int bx;
int tx;
float acpy;
__shared__ float bcpy[32];
int k;
int j;
bx = blockIdx.x;
tx = threadIdx.x;
int i = bx * blockDim.x + tx;
int tiles = (N+blockDim.x-1)/blockDim.x;
if (tx <= -(blockDim.x * bx) + N-1)
{
acpy = 0.0;
// suif_tmp0 = ((float *)(float (*)[])a)[i];
for (k = 0; k <= tiles; k++)
{
bcpy[tx] = b[blockDim.x * k + tx];
__syncthreads();
for (j = 32 * k; j <= __min(32 * k + 31, N-1); j++)
{
acpy = acpy + c[j][i] * bcpy[j - 32 * k];
}
__syncthreads();
}
a[i] = acpy;
}
****************************************************************/
}
main (int argc, char **argv) {
FILE *fp;
char line[1024];
int *ptr, *indices;
float *data, *bb, *t;
int i;
int n; // number of nonzero elements in data
int nr; // number of rows in matrix
int nc; // number of columns in matrix
// Open input file and read to end of comments
if (argc !=2) abort();
if ((fp = fopen(argv[1], "r")) == NULL) {
abort();
}
fgets(line, 128, fp);
while (line[0] == '%') {
fgets(line, 128, fp);
}
// Read number of rows (nr), number of columns (nc) and
// number of elements and allocate memory for ptr, indices, data, bb and t.
sscanf(line,"%d %d %d\n", &nr, &nc, &n);
ptr = (int *) malloc ((nr+1)*sizeof(int));
indices = (int *) malloc(n*sizeof(int));
data = (float *) malloc(n*sizeof(float));
bb = (float *) malloc(nc*sizeof(float));
t = (float *) malloc(nr*sizeof(float));
// Read data in coordinate format and initialize sparse matrix
int lastr=0;
for (i=0; i<n; i++) {
int r;
fscanf(fp,"%d %d %f\n", &r, &(indices[i]), &(data[i]));
indices[i]--; // start numbering at 0
if (r!=lastr) {
ptr[r-1] = i;
lastr = r;
}
}
ptr[nr] = n;
// initialize t to 0 and b with random data
for (i=0; i<nr; i++) {
t[i] = 0.0;
}
for (i=0; i<nc; i++) {
bb[i] = (float) rand()/1111111111;
}
// create CUDA event handles for timing purposes
cudaEvent_t start_event, stop_event;
float elapsed_time_seq, elapsed_time_gpu;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
normalMV(ptr,nr,t,data,indices, bb);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time_seq,start_event, stop_event);
int *devO1Ptr; // int * ptr
int *devI1Ptr; // int * indices
float *devI2Ptr; // float * data
float *devI3Ptr; // float * b
float *devI4Ptr; // float * t
cudaMalloc((void **)&devO1Ptr, (nr+1)*sizeof(int));
cudaMemcpy(devO1Ptr, ptr, (nr+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&devI1Ptr, n*sizeof(int));
cudaMemcpy(devI1Ptr,indices,n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&devI2Ptr,n*sizeof(float));
cudaMemcpy(devI2Ptr,data,n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&devI3Ptr,nc*sizeof(float));
cudaMemcpy(devI3Ptr,bb,nc*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&devI4Ptr,nr*sizeof(float));
cudaMemcpy(devI4Ptr, t,nr*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimGrid((N+31)/32, 1);
dim3 dimBlock(32, 1);
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
// int *ptr, int nr, float *t, float *data, int *indices, float *b
mv_GPU<<<dimGrid,dimBlock>>>(devO1Ptr,nr,devI4Ptr,devI2Ptr,devI1Ptr,devI3Ptr);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
float *d_a = NULL;
cudaMemcpy(d_a,devI4Ptr,nr*sizeof(float), cudaMemcpyDeviceToHost); //Memcopy
cudaEventElapsedTime(&elapsed_time_gpu,start_event, stop_event);
cudaFree(devO1Ptr);
cudaFree(devI1Ptr);
cudaFree(devI2Ptr);
cudaFree(devI3Ptr);
cudaFree(devI4Ptr);
printf("CHECK1\n");
int res = compare( t, d_a, nr, 0.001);
if (res == 1) {
printf("VALID!\n Sequential Time: %.2f msec\n Parallel Time: %.2f msec\n Speedup = %.2f\n",
elapsed_time_seq, elapsed_time_gpu, elapsed_time_seq/elapsed_time_gpu);
}
else printf("INVALID...\n");
}
|
8a3e14f52bb67d3326137e0effae540f1c60a8d8.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/image/remap/warp_affine_params.h"
#include <hip/hip_runtime.h>
namespace dali {
namespace {
template <int ndims>
__global__ void InvertTransformsKernel(WarpAffineParams<ndims> *output,
const WarpAffineParams<ndims> *input, int count) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x)
output[i] = input[i].inv();
}
template <int ndims>
void InvertTransforms(WarpAffineParams<ndims> *output, const WarpAffineParams<ndims> *input,
int count, hipStream_t stream) {
int blocks = div_ceil(count, 512);
int threads = ::min(count, 512);
hipLaunchKernelGGL(( InvertTransformsKernel<ndims>), dim3(blocks), dim3(threads), 0, stream, output, input, count);
}
} // namespace
template <>
void InvertTransformsGPU<2>(WarpAffineParams<2> *output, const WarpAffineParams<2> *input,
int count, hipStream_t stream) {
InvertTransforms<2>(output, input, count, stream);
}
template <>
void InvertTransformsGPU<3>(WarpAffineParams<3> *output, const WarpAffineParams<3> *input,
int count, hipStream_t stream) {
InvertTransforms<3>(output, input, count, stream);
}
} // namespace dali
| 8a3e14f52bb67d3326137e0effae540f1c60a8d8.cu | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/image/remap/warp_affine_params.h"
#include <cuda_runtime.h>
namespace dali {
namespace {
template <int ndims>
__global__ void InvertTransformsKernel(WarpAffineParams<ndims> *output,
const WarpAffineParams<ndims> *input, int count) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x)
output[i] = input[i].inv();
}
template <int ndims>
void InvertTransforms(WarpAffineParams<ndims> *output, const WarpAffineParams<ndims> *input,
int count, cudaStream_t stream) {
int blocks = div_ceil(count, 512);
int threads = std::min(count, 512);
InvertTransformsKernel<ndims><<<blocks, threads, 0, stream>>>(output, input, count);
}
} // namespace
template <>
void InvertTransformsGPU<2>(WarpAffineParams<2> *output, const WarpAffineParams<2> *input,
int count, cudaStream_t stream) {
InvertTransforms<2>(output, input, count, stream);
}
template <>
void InvertTransformsGPU<3>(WarpAffineParams<3> *output, const WarpAffineParams<3> *input,
int count, cudaStream_t stream) {
InvertTransforms<3>(output, input, count, stream);
}
} // namespace dali
|
e4056f9ae04cca98ae6f4b5aa919cfdca4d89562.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cmath>
#include <memory>
#include <iostream>
#include <vector>
#include <utility> // std::pair
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "Assert.cuh"
#include "Agent.hpp"
#include "Constraint.hpp"
#include "Variable.hpp"
#include "Types.hpp"
#include "Utils/Permutation.hpp"
#include "Utils/GpuAllocator.cuh"
#include "BE/GpuTable.cuh"
// Create the CPU Table Associated to the Constraint con
GpuTable::GpuTable(Constraint::ptr con)
: values(nullptr), utils(nullptr), nbEntries(0)
{
std::vector<std::pair<int, int>> prIdxStore;
int arity = con->getArity();
for (int i=0; i<arity; i++) {
auto& agt_i = con->getVariableAt(i)->getAgt();
auto pair = std::make_pair<>(agt_i->getPriority(), i);
prIdxStore.push_back(pair);
}
std::sort(prIdxStore.begin(), prIdxStore.end(),
std::less<std::pair<int,int>>());
// New Ordered Constraint Scope (acending order) - variable to eliminate
// appear as last.
std::vector<int> newScope(arity);
std::vector<value_t> domMax(arity);
GpuTable::scope.resize(arity);
for (int i=0; i<arity; i++) {
newScope[i] = prIdxStore[i].second;
// Get bounds (in the new order)
domMax[i] = con->getVariableAt(newScope[i])->getMax();
GpuTable::scope[i] = con->getVariableAt(newScope[i]);
}
// Gen Permutations
combinatorics::Permutation<value_t> perm(domMax);
// Store Information
GpuTable::nbEntries = perm.size();
auto& permList = perm.getPermutations();
std::vector<util_t> hostUtils(GpuTable::nbEntries, ZERO);
std::vector<value_t> oriOrderValues(arity);
// For each permutation transform back the number and get value
for (int i=0; i<GpuTable::nbEntries; i++) {
for (int j=0; j<arity; j++) {
oriOrderValues[newScope[j]] = permList[i][j];
}
// save value, and util
hostUtils[i] = con->getUtil(oriOrderValues);
}
// Allocate data on Device and save pointer back to GpuTable
GpuTable::utils = Gpu::Allocator::alloc(hostUtils);
}
// Creates an Empty GPU Table Associaed to the set of variables given
// [todo] This step is not needed (do not need to generate all perm to count them!
GpuTable::GpuTable(std::vector<Agent::ptr> agts)
: values(nullptr), utils(nullptr), nbEntries(0)
{
std::vector<std::pair<int, int>> prIdxStore;
// auto& v_i = agts[i]->getVariable();
for (int i=0; i<agts.size(); i++) {
auto pair = std::make_pair<>(agts[i]->getPriority(), i);
prIdxStore.push_back(pair);
}
std::sort(prIdxStore.begin(), prIdxStore.end(),
std::less<std::pair<int,int>>());
// New Ordered Constraint Scope (descending order)
GpuTable::scope.resize(agts.size());
GpuTable::nbEntries = 1;
for (int i=0; i<agts.size(); i++) {
int idx = prIdxStore[i].second;
GpuTable::scope[i] = agts[idx]->getVariable();
// Get bounds (in the new order)
GpuTable::nbEntries *= GpuTable::scope[i]->getDomSize();
}
// Store Information
GpuTable::utils = Gpu::Allocator::alloc(nbEntries, ZERO);
}
GpuTable::~GpuTable() {
if (values)
Gpu::Allocator::free(values);
if (utils)
Gpu::Allocator::free(utils);
}
std::string
GpuTable::to_string() {
std::string res;
res += " scope = [";
for (auto v : scope) res += v->getName() + ",";
res += "]\n";
util_t *h_utils = (util_t*) malloc(nbEntries * sizeof(util_t));
cuCheck(hipMemcpy(h_utils, utils, sizeof(util_t) * nbEntries, hipMemcpyDeviceToHost));
for (int i=0; i<::min((size_t)10, nbEntries); i++) {
res += std::to_string(i) + ": " + std::to_string(h_utils[i]) + "\n";
}
if (h_utils) free(h_utils);
return res;
}
void
GpuTable::update(std::vector<util_t> hostUtils, std::vector<Variable::ptr> vars) {
size_t newSize = hostUtils.size();
if (newSize > nbEntries) {
Gpu::Allocator::free(utils);
utils = Gpu::Allocator::alloc(hostUtils);
} else {
Gpu::Allocator::cpyToDevice(utils, &hostUtils[0], newSize);
}
nbEntries = newSize;
scope = vars;
}
| e4056f9ae04cca98ae6f4b5aa919cfdca4d89562.cu | #include <algorithm>
#include <cmath>
#include <memory>
#include <iostream>
#include <vector>
#include <utility> // std::pair
#include <cuda.h>
#include <cuda_runtime.h>
#include "Assert.cuh"
#include "Agent.hpp"
#include "Constraint.hpp"
#include "Variable.hpp"
#include "Types.hpp"
#include "Utils/Permutation.hpp"
#include "Utils/GpuAllocator.cuh"
#include "BE/GpuTable.cuh"
// Create the CPU Table Associated to the Constraint con
GpuTable::GpuTable(Constraint::ptr con)
: values(nullptr), utils(nullptr), nbEntries(0)
{
std::vector<std::pair<int, int>> prIdxStore;
int arity = con->getArity();
for (int i=0; i<arity; i++) {
auto& agt_i = con->getVariableAt(i)->getAgt();
auto pair = std::make_pair<>(agt_i->getPriority(), i);
prIdxStore.push_back(pair);
}
std::sort(prIdxStore.begin(), prIdxStore.end(),
std::less<std::pair<int,int>>());
// New Ordered Constraint Scope (acending order) - variable to eliminate
// appear as last.
std::vector<int> newScope(arity);
std::vector<value_t> domMax(arity);
GpuTable::scope.resize(arity);
for (int i=0; i<arity; i++) {
newScope[i] = prIdxStore[i].second;
// Get bounds (in the new order)
domMax[i] = con->getVariableAt(newScope[i])->getMax();
GpuTable::scope[i] = con->getVariableAt(newScope[i]);
}
// Gen Permutations
combinatorics::Permutation<value_t> perm(domMax);
// Store Information
GpuTable::nbEntries = perm.size();
auto& permList = perm.getPermutations();
std::vector<util_t> hostUtils(GpuTable::nbEntries, ZERO);
std::vector<value_t> oriOrderValues(arity);
// For each permutation transform back the number and get value
for (int i=0; i<GpuTable::nbEntries; i++) {
for (int j=0; j<arity; j++) {
oriOrderValues[newScope[j]] = permList[i][j];
}
// save value, and util
hostUtils[i] = con->getUtil(oriOrderValues);
}
// Allocate data on Device and save pointer back to GpuTable
GpuTable::utils = Gpu::Allocator::alloc(hostUtils);
}
// Creates an Empty GPU Table Associaed to the set of variables given
// [todo] This step is not needed (do not need to generate all perm to count them!
GpuTable::GpuTable(std::vector<Agent::ptr> agts)
: values(nullptr), utils(nullptr), nbEntries(0)
{
std::vector<std::pair<int, int>> prIdxStore;
// auto& v_i = agts[i]->getVariable();
for (int i=0; i<agts.size(); i++) {
auto pair = std::make_pair<>(agts[i]->getPriority(), i);
prIdxStore.push_back(pair);
}
std::sort(prIdxStore.begin(), prIdxStore.end(),
std::less<std::pair<int,int>>());
// New Ordered Constraint Scope (descending order)
GpuTable::scope.resize(agts.size());
GpuTable::nbEntries = 1;
for (int i=0; i<agts.size(); i++) {
int idx = prIdxStore[i].second;
GpuTable::scope[i] = agts[idx]->getVariable();
// Get bounds (in the new order)
GpuTable::nbEntries *= GpuTable::scope[i]->getDomSize();
}
// Store Information
GpuTable::utils = Gpu::Allocator::alloc(nbEntries, ZERO);
}
GpuTable::~GpuTable() {
if (values)
Gpu::Allocator::free(values);
if (utils)
Gpu::Allocator::free(utils);
}
std::string
GpuTable::to_string() {
std::string res;
res += " scope = [";
for (auto v : scope) res += v->getName() + ",";
res += "]\n";
util_t *h_utils = (util_t*) malloc(nbEntries * sizeof(util_t));
cuCheck(cudaMemcpy(h_utils, utils, sizeof(util_t) * nbEntries, cudaMemcpyDeviceToHost));
for (int i=0; i<std::min((size_t)10, nbEntries); i++) {
res += std::to_string(i) + ": " + std::to_string(h_utils[i]) + "\n";
}
if (h_utils) free(h_utils);
return res;
}
void
GpuTable::update(std::vector<util_t> hostUtils, std::vector<Variable::ptr> vars) {
size_t newSize = hostUtils.size();
if (newSize > nbEntries) {
Gpu::Allocator::free(utils);
utils = Gpu::Allocator::alloc(hostUtils);
} else {
Gpu::Allocator::cpyToDevice(utils, &hostUtils[0], newSize);
}
nbEntries = newSize;
scope = vars;
}
|
bb4b0af522cc836669448a1a2ffb1597b50c065c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void sumArraysZeroCopy(float *A,float *B,float *C,const int N){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N) C[i] = A[i] + B[i];
}
void sumArraysOnHost(float *A, float *B, float *C,const int N){
for (int idx=0;idx<N;idx++){
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGPU(float *A,float *B,float *C,const int N){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N) C[i] = A[i] + B[i];
}
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0e-8;
for (int i=0; i < N; i++){
if(abs(hostRef[i]-gpuRef[i])>epsilon){
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n",hostRef[i],gpuRef[i],i);
break;
}
}
return;
}
void initialData(float *ip, int size){
int i;
for (i=0;i<size;i++){
ip[i] = (float)(rand() & 0xFF)/10.0f;
}
return;
}
int main(int argc,char **argv){
// part 0: set up device and array
// set up device
int dev = 0;
hipSetDevice(dev);
// get device properties
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp,dev);
// check if support mapped memory
if (!deviceProp.canMapHostMemory){
printf("Device %d does not support mapping CPU host memory!\n",dev);
hipDeviceReset();
exit(EXIT_SUCCESS);
}
printf("Using Device %d: %s ",dev,deviceProp.name);
// set up date size of vectors
int ipower = 10;
if (argc > 1) ipower = atoi(argv[1]);
int nElem = 1 << ipower;
size_t nBytes = nElem * sizeof(float);
if (ipower < 18){
printf("Vector size %d power %d nbytes %3.0f KB\n",nElem,
ipower,(float)nBytes/(1024.0f));
}else{
printf("Vector size %d power %d nbytes %3.0f MB\n",nElem,
ipower,(float)nBytes/(1024.0f));
}
// part 1: using device memory
// malloc host memory
float *h_A,*h_B,*hostRef,*gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A,nElem);
initialData(h_B,nElem);
memset(hostRef,0,nBytes);
memset(gpuRef,0,nBytes);
// add vector at host side for result checks
sumArraysOnHost(h_A,h_B,hostRef,nElem);
// malloc device global memory
float *d_A,*d_B,*d_C;
hipMalloc((float **)&d_A,nBytes);
hipMalloc((float **)&d_B,nBytes);
hipMalloc((float **)&d_C,nBytes);
// transfer data from host to device
hipMemcpy(d_A,h_A,nBytes,hipMemcpyHostToDevice);
hipMemcpy(d_B,h_B,nBytes,hipMemcpyHostToDevice);
// set up execution configuration
int iLen = 512;
dim3 block(iLen);
dim3 grid((nElem+block.x-1)/block.x);
// invoke kernel at host side
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid),dim3(block), 0, 0, d_A,d_B,d_C,nElem);
// copy kernel result back to host side
hipMemcpy(gpuRef,d_C,nBytes,hipMemcpyDeviceToHost);
// free device global memory
hipFree(d_A);
hipFree(d_B);
free(h_A);
free(h_B);
// part 2: using zerocopy memory for array A and B
// allocate zerocpy memory
unsigned int flags = hipHostMallocMapped;
hipHostMalloc((void **)&h_A,nBytes,flags);
hipHostMalloc((void **)&h_B,nBytes,flags);
// initialize data at host side
initialData(h_A,nElem);
initialData(h_B,nElem);
memset(hostRef,0,nBytes);
memset(gpuRef,0,nBytes);
/* // To show the results of UVA
* // pass the pointer to device
* hipHostGetDevicePointer((void **)&d_A,(void *)h_A,0);
* hipHostGetDevicePointer((void **)&d_B,(void *)h_B,0);
*/
// add at host side for result checks
sumArraysOnHost(h_A,h_B,hostRef,nElem);
// execute kernel with zero copy memory
hipLaunchKernelGGL(( sumArraysZeroCopy), dim3(grid),dim3(block), 0, 0, h_A,h_B,d_C,nElem);
// copy kernel result back to host side
hipMemcpy(gpuRef,d_C,nBytes,hipMemcpyDeviceToHost);
// check device results
checkResult(hostRef,gpuRef,nElem);
// free memory
hipFree(d_C);
hipHostFree(h_A);
hipHostFree(h_B);
free(hostRef);
free(gpuRef);
// reset device
hipDeviceReset();
return EXIT_SUCCESS;
}
| bb4b0af522cc836669448a1a2ffb1597b50c065c.cu | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void sumArraysZeroCopy(float *A,float *B,float *C,const int N){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N) C[i] = A[i] + B[i];
}
void sumArraysOnHost(float *A, float *B, float *C,const int N){
for (int idx=0;idx<N;idx++){
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGPU(float *A,float *B,float *C,const int N){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N) C[i] = A[i] + B[i];
}
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0e-8;
for (int i=0; i < N; i++){
if(abs(hostRef[i]-gpuRef[i])>epsilon){
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n",hostRef[i],gpuRef[i],i);
break;
}
}
return;
}
void initialData(float *ip, int size){
int i;
for (i=0;i<size;i++){
ip[i] = (float)(rand() & 0xFF)/10.0f;
}
return;
}
int main(int argc,char **argv){
// part 0: set up device and array
// set up device
int dev = 0;
cudaSetDevice(dev);
// get device properties
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev);
// check if support mapped memory
if (!deviceProp.canMapHostMemory){
printf("Device %d does not support mapping CPU host memory!\n",dev);
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
printf("Using Device %d: %s ",dev,deviceProp.name);
// set up date size of vectors
int ipower = 10;
if (argc > 1) ipower = atoi(argv[1]);
int nElem = 1 << ipower;
size_t nBytes = nElem * sizeof(float);
if (ipower < 18){
printf("Vector size %d power %d nbytes %3.0f KB\n",nElem,
ipower,(float)nBytes/(1024.0f));
}else{
printf("Vector size %d power %d nbytes %3.0f MB\n",nElem,
ipower,(float)nBytes/(1024.0f));
}
// part 1: using device memory
// malloc host memory
float *h_A,*h_B,*hostRef,*gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A,nElem);
initialData(h_B,nElem);
memset(hostRef,0,nBytes);
memset(gpuRef,0,nBytes);
// add vector at host side for result checks
sumArraysOnHost(h_A,h_B,hostRef,nElem);
// malloc device global memory
float *d_A,*d_B,*d_C;
cudaMalloc((float **)&d_A,nBytes);
cudaMalloc((float **)&d_B,nBytes);
cudaMalloc((float **)&d_C,nBytes);
// transfer data from host to device
cudaMemcpy(d_A,h_A,nBytes,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_B,nBytes,cudaMemcpyHostToDevice);
// set up execution configuration
int iLen = 512;
dim3 block(iLen);
dim3 grid((nElem+block.x-1)/block.x);
// invoke kernel at host side
sumArraysOnGPU<<<grid,block>>>(d_A,d_B,d_C,nElem);
// copy kernel result back to host side
cudaMemcpy(gpuRef,d_C,nBytes,cudaMemcpyDeviceToHost);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
free(h_A);
free(h_B);
// part 2: using zerocopy memory for array A and B
// allocate zerocpy memory
unsigned int flags = cudaHostAllocMapped;
cudaHostAlloc((void **)&h_A,nBytes,flags);
cudaHostAlloc((void **)&h_B,nBytes,flags);
// initialize data at host side
initialData(h_A,nElem);
initialData(h_B,nElem);
memset(hostRef,0,nBytes);
memset(gpuRef,0,nBytes);
/* // To show the results of UVA
* // pass the pointer to device
* cudaHostGetDevicePointer((void **)&d_A,(void *)h_A,0);
* cudaHostGetDevicePointer((void **)&d_B,(void *)h_B,0);
*/
// add at host side for result checks
sumArraysOnHost(h_A,h_B,hostRef,nElem);
// execute kernel with zero copy memory
sumArraysZeroCopy<<<grid,block>>>(h_A,h_B,d_C,nElem);
// copy kernel result back to host side
cudaMemcpy(gpuRef,d_C,nBytes,cudaMemcpyDeviceToHost);
// check device results
checkResult(hostRef,gpuRef,nElem);
// free memory
cudaFree(d_C);
cudaFreeHost(h_A);
cudaFreeHost(h_B);
free(hostRef);
free(gpuRef);
// reset device
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
fdc2dba0584b84441b4c45cfa4edda8add44baf4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/sqrt_square_sum_kernel_util.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include <hipcub/hipcub.hpp>
namespace oneflow {
namespace {
template<typename T>
__global__ void SqrtSquareSumForOneThreadBlock(int64_t n, const T* x, T* y) {
T t_sum = 0;
CUDA_1D_KERNEL_LOOP(i, n) { t_sum += x[i] * x[i]; }
typedef hipcub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T b_sum = BlockReduce(temp_storage).Sum(t_sum);
if (threadIdx.x == 0) { *y = sqrt(b_sum); }
}
template<typename T>
__global__ void SqrtSumForMultiThreadBlock(int64_t n, const T* x, T* y) {
T t_sum = 0;
CUDA_1D_KERNEL_LOOP(i, n) { t_sum += x[i]; }
typedef hipcub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T b_sum = BlockReduce(temp_storage).Sum(t_sum);
if (threadIdx.x == 0) { *y = sqrt(b_sum); }
}
template<typename T>
__global__ void SquareSumForMultiThreadBlock(int64_t n, const T* x, T* tmp) {
T t_sum = 0;
CUDA_1D_KERNEL_LOOP(i, n) { t_sum += x[i] * x[i]; }
typedef hipcub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T b_sum = BlockReduce(temp_storage).Sum(t_sum);
if (threadIdx.x == 0) { tmp[blockIdx.x] = b_sum; }
}
} // namespace
template<typename T>
struct SqrtSquareSumKernelUtil<DeviceType::kCUDA, T> {
static void SqrtSquareSum(ep::Stream* stream, int64_t n, const T* x, T* y, T* tmp) {
const int32_t num_blocks = BlocksNum4ThreadsNum(n);
CHECK_GE(num_blocks, 0);
if (num_blocks == 1) {
hipLaunchKernelGGL(( SqrtSquareSumForOneThreadBlock<T>)
, dim3(1), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), n, x, y);
} else {
Memset<DeviceType::kCUDA>(stream, y, 0, sizeof(T));
hipLaunchKernelGGL(( SquareSumForMultiThreadBlock<T>)
, dim3(num_blocks), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(),
n, x, tmp);
hipLaunchKernelGGL(( SqrtSumForMultiThreadBlock<T>)
, dim3(1), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(),
num_blocks, tmp, y);
}
}
};
#define INSTANTIATE_SQRT_SQUARE_SUM_KERNEL_UTIL_CUDA(type_cpp, type_proto) \
template struct SqrtSquareSumKernelUtil<DeviceType::kCUDA, type_cpp>;
OF_PP_FOR_EACH_TUPLE(INSTANTIATE_SQRT_SQUARE_SUM_KERNEL_UTIL_CUDA, FLOATING_DATA_TYPE_SEQ);
#undef INSTANTIATE_SQRT_SQUARE_SUM_KERNEL_UTIL_CUDA
} // namespace oneflow
| fdc2dba0584b84441b4c45cfa4edda8add44baf4.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/sqrt_square_sum_kernel_util.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include <cub/cub.cuh>
namespace oneflow {
namespace {
template<typename T>
__global__ void SqrtSquareSumForOneThreadBlock(int64_t n, const T* x, T* y) {
T t_sum = 0;
CUDA_1D_KERNEL_LOOP(i, n) { t_sum += x[i] * x[i]; }
typedef cub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T b_sum = BlockReduce(temp_storage).Sum(t_sum);
if (threadIdx.x == 0) { *y = sqrt(b_sum); }
}
template<typename T>
__global__ void SqrtSumForMultiThreadBlock(int64_t n, const T* x, T* y) {
T t_sum = 0;
CUDA_1D_KERNEL_LOOP(i, n) { t_sum += x[i]; }
typedef cub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T b_sum = BlockReduce(temp_storage).Sum(t_sum);
if (threadIdx.x == 0) { *y = sqrt(b_sum); }
}
template<typename T>
__global__ void SquareSumForMultiThreadBlock(int64_t n, const T* x, T* tmp) {
T t_sum = 0;
CUDA_1D_KERNEL_LOOP(i, n) { t_sum += x[i] * x[i]; }
typedef cub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T b_sum = BlockReduce(temp_storage).Sum(t_sum);
if (threadIdx.x == 0) { tmp[blockIdx.x] = b_sum; }
}
} // namespace
template<typename T>
struct SqrtSquareSumKernelUtil<DeviceType::kCUDA, T> {
static void SqrtSquareSum(ep::Stream* stream, int64_t n, const T* x, T* y, T* tmp) {
const int32_t num_blocks = BlocksNum4ThreadsNum(n);
CHECK_GE(num_blocks, 0);
if (num_blocks == 1) {
SqrtSquareSumForOneThreadBlock<T>
<<<1, kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>(n, x, y);
} else {
Memset<DeviceType::kCUDA>(stream, y, 0, sizeof(T));
SquareSumForMultiThreadBlock<T>
<<<num_blocks, kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, x, tmp);
SqrtSumForMultiThreadBlock<T>
<<<1, kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>(
num_blocks, tmp, y);
}
}
};
#define INSTANTIATE_SQRT_SQUARE_SUM_KERNEL_UTIL_CUDA(type_cpp, type_proto) \
template struct SqrtSquareSumKernelUtil<DeviceType::kCUDA, type_cpp>;
OF_PP_FOR_EACH_TUPLE(INSTANTIATE_SQRT_SQUARE_SUM_KERNEL_UTIL_CUDA, FLOATING_DATA_TYPE_SEQ);
#undef INSTANTIATE_SQRT_SQUARE_SUM_KERNEL_UTIL_CUDA
} // namespace oneflow
|
5cb0217d072dd8e135583d01eb59b73295755f93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hiprand/hiprand.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <time.h>
#include <random>
#include <iostream>
#include <math.h>
#include <vector>
#include "MaxSatStructures.h"
#include "MaxSatSolvers.h"
#include "MaxSatTests.h"
#include "sortingNetworksNvidia\sortingNetworks_common.h"
using namespace std;
typedef vector< pair<int, bool> > vib;
typedef vector<bool> vb;
typedef vector<int> vi;
#define L(a) (int)((a).size())
#define all(a) (a).begin(), (a).end()
#define mp make_pair
#define Trace(X) cerr << #X << " = " << X << endl
#define _ << " _ " <<
const vector<string> SatSolver::solvers_list = { "GreedySatSolver", "GreedyDeepSatSolver", "CudaGreedyDeepSatSolver", "TabuSatSolver", "CudaSingleStepTabuSatSolver", "CudaDeepSingleStepTabuSatSolver","CudaMultiStepTabuSatSolver", "SASatSolver","CudaSingleStepSSASatSolver", "CudaMultiStepSASatSolver" };
int main()
{
//sumOrCountFanTest();
//classInCudaTest();
//sortTest();
srand(time(NULL));
hipDeviceReset();
int nbvars, nbclauses;
ifstream inFile;
inFile.open("4-s3v110c1000-2-random-973.cnf");
if (!inFile) {
cout << "Unable to open file";
exit(1); // terminate with error
}
inFile >> nbvars >> nbclauses;
nbvars++;
Cnf* cnf;
hipMallocManaged(&cnf, sizeof(Cnf));
new(cnf) Cnf(nbvars);
while (nbclauses--) {
int tempvar;
Clause *tmpcls = new Clause();
inFile >> tempvar;
while (tempvar) {
int id = abs(tempvar);
bool sgn = tempvar > 0;
tmpcls->addLiteral(id, sgn);
inFile >> tempvar;
}
cnf->addClause(*tmpcls);
}
inFile.close();
cnf->cudable();
for(int s=7; s <9; /*SatSolver::solvers_list.size();*/ s++)
for (int i = 1; i <=20; i++) {
//string type = "CudaGreedyDeepSatSolver";
cout << endl << "------------ " << SatSolver::solvers_list[s] << "---" << i << endl;
SatSolver *solver = SatSolver::factory(SatSolver::solvers_list[s],nbvars, cnf);
//SatSolver *solver = new CudaMultiStepTabuSatSolver(nbvars, cnf);
Recorder *recorder = new Recorder(solver->getName(), "3", "SAcollective1-4", to_string(i), 150, 973);
solver->setRecorder(recorder);
recorder->start();
solver->solve();
}
hipDeviceReset();
return 0;
}
| 5cb0217d072dd8e135583d01eb59b73295755f93.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <time.h>
#include <random>
#include <iostream>
#include <math.h>
#include <vector>
#include "MaxSatStructures.h"
#include "MaxSatSolvers.h"
#include "MaxSatTests.h"
#include "sortingNetworksNvidia\sortingNetworks_common.h"
using namespace std;
typedef vector< pair<int, bool> > vib;
typedef vector<bool> vb;
typedef vector<int> vi;
#define L(a) (int)((a).size())
#define all(a) (a).begin(), (a).end()
#define mp make_pair
#define Trace(X) cerr << #X << " = " << X << endl
#define _ << " _ " <<
const vector<string> SatSolver::solvers_list = { "GreedySatSolver", "GreedyDeepSatSolver", "CudaGreedyDeepSatSolver", "TabuSatSolver", "CudaSingleStepTabuSatSolver", "CudaDeepSingleStepTabuSatSolver","CudaMultiStepTabuSatSolver", "SASatSolver","CudaSingleStepSSASatSolver", "CudaMultiStepSASatSolver" };
int main()
{
//sumOrCountFanTest();
//classInCudaTest();
//sortTest();
srand(time(NULL));
cudaDeviceReset();
int nbvars, nbclauses;
ifstream inFile;
inFile.open("4-s3v110c1000-2-random-973.cnf");
if (!inFile) {
cout << "Unable to open file";
exit(1); // terminate with error
}
inFile >> nbvars >> nbclauses;
nbvars++;
Cnf* cnf;
cudaMallocManaged(&cnf, sizeof(Cnf));
new(cnf) Cnf(nbvars);
while (nbclauses--) {
int tempvar;
Clause *tmpcls = new Clause();
inFile >> tempvar;
while (tempvar) {
int id = abs(tempvar);
bool sgn = tempvar > 0;
tmpcls->addLiteral(id, sgn);
inFile >> tempvar;
}
cnf->addClause(*tmpcls);
}
inFile.close();
cnf->cudable();
for(int s=7; s <9; /*SatSolver::solvers_list.size();*/ s++)
for (int i = 1; i <=20; i++) {
//string type = "CudaGreedyDeepSatSolver";
cout << endl << "------------ " << SatSolver::solvers_list[s] << "---" << i << endl;
SatSolver *solver = SatSolver::factory(SatSolver::solvers_list[s],nbvars, cnf);
//SatSolver *solver = new CudaMultiStepTabuSatSolver(nbvars, cnf);
Recorder *recorder = new Recorder(solver->getName(), "3", "SAcollective1-4", to_string(i), 150, 973);
solver->setRecorder(recorder);
recorder->start();
solver->solve();
}
cudaDeviceReset();
return 0;
}
|
b18a5a606c9111af0738084b545cd0ab50701bfe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudakernel/memory/pad.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/memory_utils.h"
#include "cudakernel/math/math.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#include <hip/hip_fp16.h>
template <typename T>
__global__ void ppl_cukernel_range(
int64_t num_elems,
const T* start,
const T* delta,
T* output)
{
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
output[index] = start[0] + index * delta[0];
}
template <>
__global__ void ppl_cukernel_range<half>(
int64_t num_elems,
const half *start,
const half *delta,
half* output)
{
typedef Math<half, half, half> OpMath;
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
output[index] = OpMath::add(start[0], OpMath::mul(delta[0], __ll2half_rn(index)));
}
ppl::common::RetCode PPLCUDARangeForwardImp(
hipStream_t stream,
const void *start,
const void *delta,
ppl::nn::TensorShape* output_shape,
void* output)
{
int block_size = 256;
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int grid_size = (num_elems + block_size - 1) / block_size;
switch(output_shape->GetDataType()) {
case ppl::common::DATATYPE_FLOAT32 :
hipLaunchKernelGGL(( ppl_cukernel_range<float>), dim3(grid_size), dim3(block_size), 0, stream, num_elems, (float*)start, (float*)delta, (float*)output);
break;
case ppl::common::DATATYPE_FLOAT16 :
hipLaunchKernelGGL(( ppl_cukernel_range<half>), dim3(grid_size), dim3(block_size), 0, stream, num_elems, (half*)start, (half*)delta, (half*)output);
break;
case ppl::common::DATATYPE_INT64 :
hipLaunchKernelGGL(( ppl_cukernel_range<int64_t>), dim3(grid_size), dim3(block_size), 0, stream, num_elems, (int64_t*)start, (int64_t*)delta, (int64_t*)output);
break;
default:
return ppl::common::RC_UNSUPPORTED;
}
// ppl_cukernel_range<<<grid_size, block_size, 0, stream>>>(num_elems, start, delta, (T*)output);
return ppl::common::RC_SUCCESS;
}
| b18a5a606c9111af0738084b545cd0ab50701bfe.cu | #include "cudakernel/memory/pad.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/memory_utils.h"
#include "cudakernel/math/math.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#include <cuda_fp16.h>
template <typename T>
__global__ void ppl_cukernel_range(
int64_t num_elems,
const T* start,
const T* delta,
T* output)
{
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
output[index] = start[0] + index * delta[0];
}
template <>
__global__ void ppl_cukernel_range<half>(
int64_t num_elems,
const half *start,
const half *delta,
half* output)
{
typedef Math<half, half, half> OpMath;
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
output[index] = OpMath::add(start[0], OpMath::mul(delta[0], __ll2half_rn(index)));
}
ppl::common::RetCode PPLCUDARangeForwardImp(
cudaStream_t stream,
const void *start,
const void *delta,
ppl::nn::TensorShape* output_shape,
void* output)
{
int block_size = 256;
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int grid_size = (num_elems + block_size - 1) / block_size;
switch(output_shape->GetDataType()) {
case ppl::common::DATATYPE_FLOAT32 :
ppl_cukernel_range<float><<<grid_size, block_size, 0, stream>>>(num_elems, (float*)start, (float*)delta, (float*)output);
break;
case ppl::common::DATATYPE_FLOAT16 :
ppl_cukernel_range<half><<<grid_size, block_size, 0, stream>>>(num_elems, (half*)start, (half*)delta, (half*)output);
break;
case ppl::common::DATATYPE_INT64 :
ppl_cukernel_range<int64_t><<<grid_size, block_size, 0, stream>>>(num_elems, (int64_t*)start, (int64_t*)delta, (int64_t*)output);
break;
default:
return ppl::common::RC_UNSUPPORTED;
}
// ppl_cukernel_range<<<grid_size, block_size, 0, stream>>>(num_elems, start, delta, (T*)output);
return ppl::common::RC_SUCCESS;
}
|
78f37fb7a187db8ccdc8b195c0cf519e9886e4c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// REQUIRES: nvptx-registered-target
// Make sure we don't allow dynamic initialization for device
// variables, but accept empty constructors allowed by CUDA.
// RUN: %clang_cc1 -verify %s -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 %s
#ifdef __clang__
#include "Inputs/cuda.h"
#endif
// Use the types we share with CodeGen tests.
#include "Inputs/cuda-initializers.h"
__shared__ int s_v_i = 1;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__device__ int d_v_f = f();
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ int s_v_f = f();
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ int c_v_f = f();
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T s_t_i = {2};
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__device__ EC d_ec_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ EC s_ec_i(3);
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ EC c_ec_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ EC d_ec_i2 = {3};
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ EC s_ec_i2 = {3};
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ EC c_ec_i2 = {3};
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ ETC d_etc_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ ETC s_etc_i(3);
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ ETC c_etc_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ ETC d_etc_i2 = {3};
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ ETC s_etc_i2 = {3};
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ ETC c_etc_i2 = {3};
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ UC d_uc;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ UC s_uc;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ UC c_uc;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ UD d_ud;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ UD s_ud;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ UD c_ud;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ ECI d_eci;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ ECI s_eci;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ ECI c_eci;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ NEC d_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NEC s_nec;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ NEC c_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ NED d_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NED s_ned;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ NED c_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ NCV d_ncv;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NCV s_ncv;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ NCV c_ncv;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ VD d_vd;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ VD s_vd;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ VD c_vd;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ NCF d_ncf;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NCF s_ncf;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ NCF c_ncf;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NCFS s_ncfs;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__device__ UTC d_utc;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ UTC s_utc;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ UTC c_utc;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ UTC d_utc_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ UTC s_utc_i(3);
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ UTC c_utc_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ NETC d_netc;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NETC s_netc;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ NETC c_netc;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ NETC d_netc_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NETC s_netc_i(3);
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ NETC c_netc_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ EC_I_EC1 d_ec_i_ec1;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ EC_I_EC1 s_ec_i_ec1;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ EC_I_EC1 c_ec_i_ec1;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_V_T d_t_v_t;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_V_T s_t_v_t;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_V_T c_t_v_t;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_B_NEC d_t_b_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_B_NEC s_t_b_nec;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_B_NEC c_t_b_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_F_NEC d_t_f_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_F_NEC s_t_f_nec;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_F_NEC c_t_f_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_FA_NEC d_t_fa_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_FA_NEC s_t_fa_nec;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_FA_NEC c_t_fa_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_B_NED d_t_b_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_B_NED s_t_b_ned;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_B_NED c_t_b_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_F_NED d_t_f_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_F_NED s_t_f_ned;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_F_NED c_t_f_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_FA_NED d_t_fa_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_FA_NED s_t_fa_ned;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_FA_NED c_t_fa_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
// Verify that only __shared__ local variables may be static on device
// side and that they are not allowed to be initialized.
__device__ void df_sema() {
static __shared__ NCFS s_ncfs;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
static __shared__ UC s_uc;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
static __shared__ NED s_ned;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
static __device__ int ds;
// expected-error@-1 {{Within a __device__/__global__ function, only __shared__ variables may be marked "static"}}
static __constant__ int dc;
// expected-error@-1 {{Within a __device__/__global__ function, only __shared__ variables may be marked "static"}}
static int v;
// expected-error@-1 {{Within a __device__/__global__ function, only __shared__ variables may be marked "static"}}
}
| 78f37fb7a187db8ccdc8b195c0cf519e9886e4c0.cu | // REQUIRES: nvptx-registered-target
// Make sure we don't allow dynamic initialization for device
// variables, but accept empty constructors allowed by CUDA.
// RUN: %clang_cc1 -verify %s -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 %s
#ifdef __clang__
#include "Inputs/cuda.h"
#endif
// Use the types we share with CodeGen tests.
#include "Inputs/cuda-initializers.h"
__shared__ int s_v_i = 1;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__device__ int d_v_f = f();
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ int s_v_f = f();
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ int c_v_f = f();
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T s_t_i = {2};
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__device__ EC d_ec_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ EC s_ec_i(3);
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ EC c_ec_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ EC d_ec_i2 = {3};
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ EC s_ec_i2 = {3};
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ EC c_ec_i2 = {3};
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ ETC d_etc_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ ETC s_etc_i(3);
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ ETC c_etc_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ ETC d_etc_i2 = {3};
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ ETC s_etc_i2 = {3};
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ ETC c_etc_i2 = {3};
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ UC d_uc;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ UC s_uc;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ UC c_uc;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ UD d_ud;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ UD s_ud;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ UD c_ud;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ ECI d_eci;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ ECI s_eci;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ ECI c_eci;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ NEC d_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NEC s_nec;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ NEC c_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ NED d_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NED s_ned;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ NED c_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ NCV d_ncv;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NCV s_ncv;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ NCV c_ncv;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ VD d_vd;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ VD s_vd;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ VD c_vd;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ NCF d_ncf;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NCF s_ncf;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ NCF c_ncf;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NCFS s_ncfs;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__device__ UTC d_utc;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ UTC s_utc;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ UTC c_utc;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ UTC d_utc_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ UTC s_utc_i(3);
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ UTC c_utc_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ NETC d_netc;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NETC s_netc;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ NETC c_netc;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ NETC d_netc_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ NETC s_netc_i(3);
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ NETC c_netc_i(3);
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ EC_I_EC1 d_ec_i_ec1;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ EC_I_EC1 s_ec_i_ec1;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ EC_I_EC1 c_ec_i_ec1;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_V_T d_t_v_t;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_V_T s_t_v_t;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_V_T c_t_v_t;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_B_NEC d_t_b_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_B_NEC s_t_b_nec;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_B_NEC c_t_b_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_F_NEC d_t_f_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_F_NEC s_t_f_nec;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_F_NEC c_t_f_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_FA_NEC d_t_fa_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_FA_NEC s_t_fa_nec;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_FA_NEC c_t_fa_nec;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_B_NED d_t_b_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_B_NED s_t_b_ned;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_B_NED c_t_b_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_F_NED d_t_f_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_F_NED s_t_f_ned;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_F_NED c_t_f_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__device__ T_FA_NED d_t_fa_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
__shared__ T_FA_NED s_t_fa_ned;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
__constant__ T_FA_NED c_t_fa_ned;
// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
// Verify that only __shared__ local variables may be static on device
// side and that they are not allowed to be initialized.
__device__ void df_sema() {
static __shared__ NCFS s_ncfs;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
static __shared__ UC s_uc;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
static __shared__ NED s_ned;
// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
static __device__ int ds;
// expected-error@-1 {{Within a __device__/__global__ function, only __shared__ variables may be marked "static"}}
static __constant__ int dc;
// expected-error@-1 {{Within a __device__/__global__ function, only __shared__ variables may be marked "static"}}
static int v;
// expected-error@-1 {{Within a __device__/__global__ function, only __shared__ variables may be marked "static"}}
}
|
c25c41fa982eb155601e1d1ecb47f805e0e3dae2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/fake_quantize_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void FindAbsMaxKernel(const T* in, const int n, T* out) {
int bid = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
extern __shared__ T shared_max_data[];
if (gridDim.x > 1) {
shared_max_data[tid] = T(0);
for (int i = bid; i < n; i += blockDim.x * gridDim.x) {
T tmp = fabs(in[i]);
if (tmp > shared_max_data[tid]) {
shared_max_data[tid] = tmp;
}
}
} else {
if (bid < n) {
shared_max_data[tid] = fabs(in[bid]);
} else {
shared_max_data[tid] = T(0);
}
}
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i >>= 1) {
if (tid < i && (shared_max_data[tid] < shared_max_data[tid + i])) {
shared_max_data[tid] = shared_max_data[tid + i];
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = shared_max_data[0];
}
}
template <typename T>
struct FindAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx, const T* in,
const int num, T* out) {
int block = 1024;
int grid = (block - 1 + num) / block;
grid = (grid > block) ? block : grid;
framework::Tensor max;
T* max_data =
max.mutable_data<T>(framework::make_ddim({grid}), ctx.GetPlace());
hipLaunchKernelGGL(( FindAbsMaxKernel<T>), dim3(grid), dim3(block), 1024 * sizeof(T), ctx.stream(),
in, num, max_data);
hipLaunchKernelGGL(( FindAbsMaxKernel<T>), dim3(1), dim3(block), 1024 * sizeof(T), ctx.stream(),
max_data, grid, out);
}
};
template struct FindAbsMaxFunctor<platform::CUDADeviceContext, float>;
template <typename T>
__global__ void FindChannelAbsMaxKernel(const T* in, const int n, const int c,
T* out) {
int tid = threadIdx.x;
int channel_size = n / c;
const T* in_c = in + blockIdx.x * channel_size;
extern __shared__ T shared_max_data[];
shared_max_data[tid] = T(0);
for (int i = tid; i < channel_size; i += blockDim.x) {
T tmp = fabs(in_c[i]);
if (tmp > shared_max_data[tid]) {
shared_max_data[tid] = tmp;
}
}
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i >>= 1) {
if (tid < i && (shared_max_data[tid] < shared_max_data[tid + i])) {
shared_max_data[tid] = shared_max_data[tid + i];
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = shared_max_data[0];
}
}
template <typename T>
struct FindChannelAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx, const T* in,
const int num, const int channel, T* out) {
int block = 1024;
int grid = channel;
hipLaunchKernelGGL(( FindChannelAbsMaxKernel<T>), dim3(grid), dim3(block), 1024 * sizeof(T), ctx.stream(),
in, num, channel, out);
}
};
template struct FindChannelAbsMaxFunctor<platform::CUDADeviceContext, float>;
template <typename T>
__global__ void ClipAndQuantKernel(const T* in, const T* scale,
const int bin_cnt, const int n, T* out) {
int bid = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
T s = scale[0];
for (int i = bid; i < n; i += blockDim.x * gridDim.x) {
T x = in[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt / s * v;
out[i] = round(v);
}
}
template <typename T>
struct ClipAndFakeQuantFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in, const framework::Tensor& scale,
const int bin_cnt, framework::Tensor* out) {
int num = in.numel();
int block = 1024;
int grid = (block - 1 + num) / block;
const T* in_data = in.data<T>();
const T* scale_data = scale.data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
hipLaunchKernelGGL(( ClipAndQuantKernel<T>), dim3(grid), dim3(block), 0, ctx.stream(),
in_data, scale_data, bin_cnt, num, out_data);
}
};
template struct ClipAndFakeQuantFunctor<platform::CUDADeviceContext, float>;
template <typename T>
__global__ void ChannelClipAndQuantKernel(const T* in, const T* scale,
const int bin_cnt, const int n,
const int c, T* out) {
int tid = threadIdx.x;
int channel_size = n / c;
const T* in_c = in + blockIdx.x * channel_size;
T* out_c = out + blockIdx.x * channel_size;
T s = scale[blockIdx.x];
for (int i = tid; i < channel_size; i += blockDim.x) {
T x = in_c[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt / s * v;
out_c[i] = round(v);
}
}
template <typename T>
struct ChannelClipAndFakeQuantFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in, const framework::Tensor& scale,
const int bin_cnt, const int channel,
framework::Tensor* out) {
int num = in.numel();
int block = 1024;
int grid = channel;
const T* in_data = in.data<T>();
const T* scale_data = scale.data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
hipLaunchKernelGGL(( ChannelClipAndQuantKernel<T>), dim3(grid), dim3(block), 0, ctx.stream(),
in_data, scale_data, bin_cnt, num, channel, out_data);
}
};
template struct ChannelClipAndFakeQuantFunctor<platform::CUDADeviceContext,
float>;
template <typename T>
__global__ void FindRangeAbsMaxAndFillArray(const T* cur_scale,
const T* last_scale,
const int64_t* iter,
const int window_size, T* scale_arr,
T* out_scale, int* need_find_max,
int* out_size) {
int it = iter[0];
int idx = it % window_size;
T removed = scale_arr[idx];
T cur = cur_scale[0];
scale_arr[idx] = cur;
T max = last_scale[0];
out_scale[0] = max < cur ? cur : max;
if (fabs(removed - max) < 1e-6) {
need_find_max[0] = 1;
out_size[0] = it > window_size ? window_size : it;
} else {
need_find_max[0] = 0;
}
}
template <typename T>
struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& cur_scale,
const framework::Tensor& last_scale,
const framework::Tensor& iter, const int window_size,
framework::Tensor* scales_arr, framework::Tensor* out_scale) {
const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
T* scale_arr = scales_arr->mutable_data<T>(gpu_place);
T* out_scale_data = out_scale->mutable_data<T>(gpu_place);
framework::Tensor need_find_max, out_size;
int* find_max = need_find_max.mutable_data<int>(gpu_place);
int* out_size_data = out_size.mutable_data<int>(gpu_place);
hipLaunchKernelGGL(( FindRangeAbsMaxAndFillArray<T>), dim3(1), dim3(1), 0, ctx.stream(),
cur_scale.data<T>(), last_scale.data<T>(), iter.data<int64_t>(),
window_size, scale_arr, out_scale_data, find_max, out_size_data);
int g_find_max;
memory::Copy(platform::CPUPlace(), &g_find_max, gpu_place, find_max,
sizeof(int), ctx.stream());
ctx.Wait();
if (g_find_max) {
int len;
memory::Copy(platform::CPUPlace(), &len, gpu_place, out_size_data,
sizeof(int), ctx.stream());
ctx.Wait();
FindAbsMaxFunctor<platform::CUDADeviceContext, T>()(ctx, scale_arr, len,
out_scale_data);
}
}
};
template struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, float>;
template <typename T>
struct FindMovingAverageAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in_accum,
const framework::Tensor& in_state, const T* cur_scale,
const float rate, framework::Tensor* out_state,
framework::Tensor* out_accum, framework::Tensor* out_scale) {
const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
T accum;
T state;
T scale;
memory::Copy(platform::CPUPlace(), &accum, gpu_place, in_accum.data<T>(),
sizeof(T), ctx.stream());
memory::Copy(platform::CPUPlace(), &state, gpu_place, in_state.data<T>(),
sizeof(T), ctx.stream());
memory::Copy(platform::CPUPlace(), &scale, gpu_place, cur_scale, sizeof(T),
ctx.stream());
ctx.Wait();
state = rate * state + 1;
accum = rate * accum + scale;
scale = accum / state;
memory::Copy(gpu_place, out_accum->mutable_data<T>(gpu_place),
platform::CPUPlace(), &accum, sizeof(T), ctx.stream());
memory::Copy(gpu_place, out_state->mutable_data<T>(gpu_place),
platform::CPUPlace(), &state, sizeof(T), ctx.stream());
memory::Copy(gpu_place, out_scale->mutable_data<T>(gpu_place),
platform::CPUPlace(), &scale, sizeof(T), ctx.stream());
ctx.Wait();
}
};
template struct FindMovingAverageAbsMaxFunctor<platform::CUDADeviceContext,
float>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using CUDA = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(fake_quantize_abs_max,
ops::FakeQuantizeAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(fake_channel_wise_quantize_abs_max,
ops::FakeChannelWiseQuantizeAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(fake_quantize_range_abs_max,
ops::FakeQuantizeRangeAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(
fake_quantize_moving_average_abs_max,
ops::FakeQuantizeMovingAverageAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(moving_average_abs_max_scale,
ops::MovingAverageAbsMaxScaleKernel<CUDA, float>);
| c25c41fa982eb155601e1d1ecb47f805e0e3dae2.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/fake_quantize_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void FindAbsMaxKernel(const T* in, const int n, T* out) {
int bid = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
extern __shared__ T shared_max_data[];
if (gridDim.x > 1) {
shared_max_data[tid] = T(0);
for (int i = bid; i < n; i += blockDim.x * gridDim.x) {
T tmp = fabs(in[i]);
if (tmp > shared_max_data[tid]) {
shared_max_data[tid] = tmp;
}
}
} else {
if (bid < n) {
shared_max_data[tid] = fabs(in[bid]);
} else {
shared_max_data[tid] = T(0);
}
}
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i >>= 1) {
if (tid < i && (shared_max_data[tid] < shared_max_data[tid + i])) {
shared_max_data[tid] = shared_max_data[tid + i];
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = shared_max_data[0];
}
}
template <typename T>
struct FindAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx, const T* in,
const int num, T* out) {
int block = 1024;
int grid = (block - 1 + num) / block;
grid = (grid > block) ? block : grid;
framework::Tensor max;
T* max_data =
max.mutable_data<T>(framework::make_ddim({grid}), ctx.GetPlace());
FindAbsMaxKernel<T><<<grid, block, 1024 * sizeof(T), ctx.stream()>>>(
in, num, max_data);
FindAbsMaxKernel<T><<<1, block, 1024 * sizeof(T), ctx.stream()>>>(
max_data, grid, out);
}
};
template struct FindAbsMaxFunctor<platform::CUDADeviceContext, float>;
template <typename T>
__global__ void FindChannelAbsMaxKernel(const T* in, const int n, const int c,
T* out) {
int tid = threadIdx.x;
int channel_size = n / c;
const T* in_c = in + blockIdx.x * channel_size;
extern __shared__ T shared_max_data[];
shared_max_data[tid] = T(0);
for (int i = tid; i < channel_size; i += blockDim.x) {
T tmp = fabs(in_c[i]);
if (tmp > shared_max_data[tid]) {
shared_max_data[tid] = tmp;
}
}
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i >>= 1) {
if (tid < i && (shared_max_data[tid] < shared_max_data[tid + i])) {
shared_max_data[tid] = shared_max_data[tid + i];
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = shared_max_data[0];
}
}
template <typename T>
struct FindChannelAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx, const T* in,
const int num, const int channel, T* out) {
int block = 1024;
int grid = channel;
FindChannelAbsMaxKernel<T><<<grid, block, 1024 * sizeof(T), ctx.stream()>>>(
in, num, channel, out);
}
};
template struct FindChannelAbsMaxFunctor<platform::CUDADeviceContext, float>;
template <typename T>
__global__ void ClipAndQuantKernel(const T* in, const T* scale,
const int bin_cnt, const int n, T* out) {
int bid = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
T s = scale[0];
for (int i = bid; i < n; i += blockDim.x * gridDim.x) {
T x = in[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt / s * v;
out[i] = round(v);
}
}
template <typename T>
struct ClipAndFakeQuantFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in, const framework::Tensor& scale,
const int bin_cnt, framework::Tensor* out) {
int num = in.numel();
int block = 1024;
int grid = (block - 1 + num) / block;
const T* in_data = in.data<T>();
const T* scale_data = scale.data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
ClipAndQuantKernel<T><<<grid, block, 0, ctx.stream()>>>(
in_data, scale_data, bin_cnt, num, out_data);
}
};
template struct ClipAndFakeQuantFunctor<platform::CUDADeviceContext, float>;
template <typename T>
__global__ void ChannelClipAndQuantKernel(const T* in, const T* scale,
const int bin_cnt, const int n,
const int c, T* out) {
int tid = threadIdx.x;
int channel_size = n / c;
const T* in_c = in + blockIdx.x * channel_size;
T* out_c = out + blockIdx.x * channel_size;
T s = scale[blockIdx.x];
for (int i = tid; i < channel_size; i += blockDim.x) {
T x = in_c[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt / s * v;
out_c[i] = round(v);
}
}
template <typename T>
struct ChannelClipAndFakeQuantFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in, const framework::Tensor& scale,
const int bin_cnt, const int channel,
framework::Tensor* out) {
int num = in.numel();
int block = 1024;
int grid = channel;
const T* in_data = in.data<T>();
const T* scale_data = scale.data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
ChannelClipAndQuantKernel<T><<<grid, block, 0, ctx.stream()>>>(
in_data, scale_data, bin_cnt, num, channel, out_data);
}
};
template struct ChannelClipAndFakeQuantFunctor<platform::CUDADeviceContext,
float>;
template <typename T>
__global__ void FindRangeAbsMaxAndFillArray(const T* cur_scale,
const T* last_scale,
const int64_t* iter,
const int window_size, T* scale_arr,
T* out_scale, int* need_find_max,
int* out_size) {
int it = iter[0];
int idx = it % window_size;
T removed = scale_arr[idx];
T cur = cur_scale[0];
scale_arr[idx] = cur;
T max = last_scale[0];
out_scale[0] = max < cur ? cur : max;
if (fabs(removed - max) < 1e-6) {
need_find_max[0] = 1;
out_size[0] = it > window_size ? window_size : it;
} else {
need_find_max[0] = 0;
}
}
template <typename T>
struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& cur_scale,
const framework::Tensor& last_scale,
const framework::Tensor& iter, const int window_size,
framework::Tensor* scales_arr, framework::Tensor* out_scale) {
const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
T* scale_arr = scales_arr->mutable_data<T>(gpu_place);
T* out_scale_data = out_scale->mutable_data<T>(gpu_place);
framework::Tensor need_find_max, out_size;
int* find_max = need_find_max.mutable_data<int>(gpu_place);
int* out_size_data = out_size.mutable_data<int>(gpu_place);
FindRangeAbsMaxAndFillArray<T><<<1, 1, 0, ctx.stream()>>>(
cur_scale.data<T>(), last_scale.data<T>(), iter.data<int64_t>(),
window_size, scale_arr, out_scale_data, find_max, out_size_data);
int g_find_max;
memory::Copy(platform::CPUPlace(), &g_find_max, gpu_place, find_max,
sizeof(int), ctx.stream());
ctx.Wait();
if (g_find_max) {
int len;
memory::Copy(platform::CPUPlace(), &len, gpu_place, out_size_data,
sizeof(int), ctx.stream());
ctx.Wait();
FindAbsMaxFunctor<platform::CUDADeviceContext, T>()(ctx, scale_arr, len,
out_scale_data);
}
}
};
template struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, float>;
template <typename T>
struct FindMovingAverageAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in_accum,
const framework::Tensor& in_state, const T* cur_scale,
const float rate, framework::Tensor* out_state,
framework::Tensor* out_accum, framework::Tensor* out_scale) {
const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
T accum;
T state;
T scale;
memory::Copy(platform::CPUPlace(), &accum, gpu_place, in_accum.data<T>(),
sizeof(T), ctx.stream());
memory::Copy(platform::CPUPlace(), &state, gpu_place, in_state.data<T>(),
sizeof(T), ctx.stream());
memory::Copy(platform::CPUPlace(), &scale, gpu_place, cur_scale, sizeof(T),
ctx.stream());
ctx.Wait();
state = rate * state + 1;
accum = rate * accum + scale;
scale = accum / state;
memory::Copy(gpu_place, out_accum->mutable_data<T>(gpu_place),
platform::CPUPlace(), &accum, sizeof(T), ctx.stream());
memory::Copy(gpu_place, out_state->mutable_data<T>(gpu_place),
platform::CPUPlace(), &state, sizeof(T), ctx.stream());
memory::Copy(gpu_place, out_scale->mutable_data<T>(gpu_place),
platform::CPUPlace(), &scale, sizeof(T), ctx.stream());
ctx.Wait();
}
};
template struct FindMovingAverageAbsMaxFunctor<platform::CUDADeviceContext,
float>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using CUDA = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(fake_quantize_abs_max,
ops::FakeQuantizeAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(fake_channel_wise_quantize_abs_max,
ops::FakeChannelWiseQuantizeAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(fake_quantize_range_abs_max,
ops::FakeQuantizeRangeAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(
fake_quantize_moving_average_abs_max,
ops::FakeQuantizeMovingAverageAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(moving_average_abs_max_scale,
ops::MovingAverageAbsMaxScaleKernel<CUDA, float>);
|
83cd8191cc79f6cb0ed0b3d67e08e5b424add1ac.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Test for checking the routine for finding the maximum element in an array of RNS numbers
*/
#include <stdio.h>
#include <iostream>
#include "../src/dpp/rnsmax.cuh"
#include "tsthelper.cuh"
#include "logger.cuh"
#define RNS_MAX_NUM_BLOCKS_1 32
#define RNS_MAX_BLOCK_SIZE_1 32
#define RNS_MAX_NUM_BLOCKS_2 32
#define RNS_MAX_BLOCK_SIZE_2 32
static void printResult(int * result){
mpz_t binary;
mpz_init(binary);
rns_to_binary(binary, result);
printf("\nresult: %s", mpz_get_str(NULL, 10, binary));
mpz_clear(binary);
}
void run_test(int array_size){
mpz_t * hx = new mpz_t[array_size];
mpz_t hmax;
//Host data
int * hrx = new int[array_size * RNS_MODULI_SIZE];
int hrmax[RNS_MODULI_SIZE];
//GPU data
int * drx;
int * drmax;
xinterval_t * dbuf;
//Memory allocation
for(int i = 0; i < array_size; i++){
mpz_init(hx[i]);
}
mpz_init(hmax);
hipMalloc(&drx, sizeof(int) * array_size * RNS_MODULI_SIZE);
hipMalloc(&drmax, sizeof(int) * RNS_MODULI_SIZE);
hipMalloc(&dbuf, sizeof(xinterval_t) * array_size);
checkDeviceHasErrors(hipDeviceSynchronize());
cudaCheckErrors();
//Generate inputs
fill_random_array(hx, array_size, BND_RNS_MODULI_PRODUCT, false);
//Convert to the RNS
for(int i = 0; i < array_size; i++){
rns_from_binary(&hrx[i*RNS_MODULI_SIZE], hx[i]);
}
// Copying to the GPU
hipMemcpy(drx, hrx, sizeof(int) * array_size * RNS_MODULI_SIZE, hipMemcpyHostToDevice);
checkDeviceHasErrors(hipDeviceSynchronize());
cudaCheckErrors();
//Compute exact result
//---------------------------------------------------------
Logger::printDash();
mpz_set_ui(hmax, 0);
for(int i = 0; i < array_size; i++){
if(mpz_cmp(hx[i], hmax) > 0){
mpz_set(hmax, hx[i]);
}
}
printf("[GNU MP] max: \nresult: %s", mpz_get_str(NULL, 10, hmax));
Logger::printSpace();
//---------------------------------------------------------
Logger::printDash();
printf("[CUDA] rns_max:");
cuda::rns_max<
RNS_MAX_NUM_BLOCKS_1,
RNS_MAX_BLOCK_SIZE_1,
RNS_MAX_NUM_BLOCKS_2,
RNS_MAX_BLOCK_SIZE_2>(drmax, drx, array_size, dbuf);
hipMemcpy(hrmax, drmax, sizeof(int) * RNS_MODULI_SIZE, hipMemcpyDeviceToHost);
printResult(hrmax);
Logger::printSpace();
checkDeviceHasErrors(hipDeviceSynchronize());
cudaCheckErrors();
//---------------------------------------------------------
Logger::printSpace();
//Cleanup
for(int i = 0; i < array_size; i++){
mpz_clear(hx[i]);
}
mpz_clear(hmax);
delete [] hx;
delete [] hrx;
hipFree(drx);
hipFree(drmax);
hipFree(dbuf);
}
int main() {
rns_const_init();
Logger::beginTestDescription(Logger::TEST_VERIFY_RNSMAX);
rns_const_print(true);
Logger::printDash();
rns_eval_const_print();
Logger::endSection(true);
Logger::printSpace();
//Launch
run_test(10000);
//End logging
Logger::printSpace();
Logger::endTestDescription();
return 1;
} | 83cd8191cc79f6cb0ed0b3d67e08e5b424add1ac.cu | /*
* Test for checking the routine for finding the maximum element in an array of RNS numbers
*/
#include <stdio.h>
#include <iostream>
#include "../src/dpp/rnsmax.cuh"
#include "tsthelper.cuh"
#include "logger.cuh"
#define RNS_MAX_NUM_BLOCKS_1 32
#define RNS_MAX_BLOCK_SIZE_1 32
#define RNS_MAX_NUM_BLOCKS_2 32
#define RNS_MAX_BLOCK_SIZE_2 32
static void printResult(int * result){
mpz_t binary;
mpz_init(binary);
rns_to_binary(binary, result);
printf("\nresult: %s", mpz_get_str(NULL, 10, binary));
mpz_clear(binary);
}
void run_test(int array_size){
mpz_t * hx = new mpz_t[array_size];
mpz_t hmax;
//Host data
int * hrx = new int[array_size * RNS_MODULI_SIZE];
int hrmax[RNS_MODULI_SIZE];
//GPU data
int * drx;
int * drmax;
xinterval_t * dbuf;
//Memory allocation
for(int i = 0; i < array_size; i++){
mpz_init(hx[i]);
}
mpz_init(hmax);
cudaMalloc(&drx, sizeof(int) * array_size * RNS_MODULI_SIZE);
cudaMalloc(&drmax, sizeof(int) * RNS_MODULI_SIZE);
cudaMalloc(&dbuf, sizeof(xinterval_t) * array_size);
checkDeviceHasErrors(cudaDeviceSynchronize());
cudaCheckErrors();
//Generate inputs
fill_random_array(hx, array_size, BND_RNS_MODULI_PRODUCT, false);
//Convert to the RNS
for(int i = 0; i < array_size; i++){
rns_from_binary(&hrx[i*RNS_MODULI_SIZE], hx[i]);
}
// Copying to the GPU
cudaMemcpy(drx, hrx, sizeof(int) * array_size * RNS_MODULI_SIZE, cudaMemcpyHostToDevice);
checkDeviceHasErrors(cudaDeviceSynchronize());
cudaCheckErrors();
//Compute exact result
//---------------------------------------------------------
Logger::printDash();
mpz_set_ui(hmax, 0);
for(int i = 0; i < array_size; i++){
if(mpz_cmp(hx[i], hmax) > 0){
mpz_set(hmax, hx[i]);
}
}
printf("[GNU MP] max: \nresult: %s", mpz_get_str(NULL, 10, hmax));
Logger::printSpace();
//---------------------------------------------------------
Logger::printDash();
printf("[CUDA] rns_max:");
cuda::rns_max<
RNS_MAX_NUM_BLOCKS_1,
RNS_MAX_BLOCK_SIZE_1,
RNS_MAX_NUM_BLOCKS_2,
RNS_MAX_BLOCK_SIZE_2>(drmax, drx, array_size, dbuf);
cudaMemcpy(hrmax, drmax, sizeof(int) * RNS_MODULI_SIZE, cudaMemcpyDeviceToHost);
printResult(hrmax);
Logger::printSpace();
checkDeviceHasErrors(cudaDeviceSynchronize());
cudaCheckErrors();
//---------------------------------------------------------
Logger::printSpace();
//Cleanup
for(int i = 0; i < array_size; i++){
mpz_clear(hx[i]);
}
mpz_clear(hmax);
delete [] hx;
delete [] hrx;
cudaFree(drx);
cudaFree(drmax);
cudaFree(dbuf);
}
int main() {
rns_const_init();
Logger::beginTestDescription(Logger::TEST_VERIFY_RNSMAX);
rns_const_print(true);
Logger::printDash();
rns_eval_const_print();
Logger::endSection(true);
Logger::printSpace();
//Launch
run_test(10000);
//End logging
Logger::printSpace();
Logger::endTestDescription();
return 1;
} |
ee66aa6ef03ae2e446dbd06c41deebf01c1ff1d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kdtree.h"
#include "bits/stdc++.h"
#include <device_launch_parameters.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#define BLK_SZ 256
using namespace thrust;
__global__ void copy_dim(float *X, int *idArr, float *auxArr, int *starts, int n, int d, int dim)
{
for(int tid=blockIdx.x*BLK_SZ + threadIdx.x; tid<n; tid+=BLK_SZ*gridDim.x)
{
int start=starts[tid], idx=idArr[tid];
if(tid<start) continue;
auxArr[tid] = X[idx*d + dim];
}
}
__global__ static void make_segments(int *segments, int *starts, int *ends, int n)
{
for(int tid=blockIdx.x*BLK_SZ + threadIdx.x; tid<n; tid+=BLK_SZ*gridDim.x)
{
int start=starts[tid], end=ends[tid];
segments[tid] = (tid<=start) ? 1 : 0;
}
}
__global__ static void update_arrays(int *starts, int *ends, int n)
{
for(int tid=blockIdx.x*BLK_SZ + threadIdx.x; tid<n; tid+=BLK_SZ*gridDim.x)
{
int start=starts[tid], end=ends[tid];
starts[tid] = (tid<(start+end)/2) ? start : (start+end)/2 + 1;
ends [tid] = (tid<(start+end)/2) ? (start+end)/2 - 1 : end;
}
}
void recursiveBuildTree(kdtree *node, float *X, int n, int d, float *mdArr, int *idArr, int start, int end, int depth)
{
float(*dataArr)[d] = (float(*)[d])X;
node->axis = depth%d;
if(start==end)
{
node->idx = idArr[start];
node->p = dataArr[ idArr[start] ];
node->mc = 0.0;
node->left = node->right = NULL;
return;
}
node->idx = idArr[(start+end)/2];
node->p = dataArr[ idArr[(start+end)/2] ];
node->mc = mdArr[(start+end)/2];
node->right = (kdtree *)malloc(sizeof(kdtree));
// Recursion
recursiveBuildTree(node->right, X, n, d, mdArr, idArr, (start+end)/2+1, end, depth+1);
if(start<(start+end)/2)
{
node->left = (kdtree *)malloc(sizeof(kdtree));
recursiveBuildTree(node->left, X, n, d, mdArr, idArr, start, (start+end)/2 - 1, depth+1);
}
else node->left = NULL;
}
kdtree *buildkd(float *X, int n, int d)
{
int *idArr = (int *)malloc(n*sizeof(int));
float *mdArr = (float *)malloc(n*sizeof(int));
// Allocate Memory on Device
device_vector<float> dev_auxArr(n), dev_X(n*d);
device_vector<int> dev_idArr(n), dev_segments(n), dev_starts(n), dev_ends(n);
// Initialize Device Variables
copy(X, X+n*d, dev_X.begin());
sequence(dev_idArr.begin(), dev_idArr.end());
fill(dev_starts.begin(), dev_starts.end(), 0);
fill(dev_ends.begin(), dev_ends.end(), n-1);
// Build tree in GPU (level by level in parallel)
int numSMs;
hipDeviceGetAttribute(&numSMs, hipDeviceAttributeMultiprocessorCount, 0);
for(int i=0; i<floor(log2(n)); i++)
{
// Parallel Copy of Dim
hipLaunchKernelGGL(( copy_dim), dim3(32*numSMs), dim3(BLK_SZ), 0, 0, raw_pointer_cast(&dev_X[0]), raw_pointer_cast(&dev_idArr[0]), raw_pointer_cast(&dev_auxArr[0]), raw_pointer_cast(&dev_starts[0]), n, d, i%d );
hipLaunchKernelGGL(( make_segments), dim3(32*numSMs), dim3(BLK_SZ), 0, 0, raw_pointer_cast(&dev_segments[0]), raw_pointer_cast(&dev_starts[0]), raw_pointer_cast(&dev_ends[0]), n);
// Parallel Sorting of segments [start, end]
inclusive_scan(dev_segments.begin(), dev_segments.end(), dev_segments.begin());
stable_sort_by_key(dev_auxArr.begin(), dev_auxArr.end(), make_zip_iterator(make_tuple(dev_idArr.begin(), dev_segments.begin())));
stable_sort_by_key(dev_segments.begin(), dev_segments.end(), make_zip_iterator(make_tuple(dev_auxArr.begin(), dev_idArr.begin())));
// Update Arrays that show for each array position in which segment [start, end] it belongs to
hipLaunchKernelGGL(( update_arrays), dim3(32*numSMs), dim3(BLK_SZ), 0, 0, raw_pointer_cast(&dev_starts[0]), raw_pointer_cast(&dev_ends[0]), n);
}
// Copy the result back to host
copy(dev_auxArr.begin(), dev_auxArr.end(), mdArr);
copy(dev_idArr.begin(), dev_idArr.end(), idArr);
// Tree build
kdtree *root = (kdtree *)malloc(sizeof(kdtree));
recursiveBuildTree(root, X, n, d, mdArr, idArr, 0, n-1, 0);
// Clean-up
free(idArr); free(mdArr);
return root;
}
float* getPoint(kdtree *node) {return node->p;}
float getMC(kdtree *node) {return node->mc;}
int getIdx(kdtree *node) {return node->idx;}
int getAxis(kdtree *node) {return node->axis;}
kdtree *getLeft(kdtree *node) {return node->left;}
kdtree *getRight(kdtree *node) {return node->right;} | ee66aa6ef03ae2e446dbd06c41deebf01c1ff1d3.cu | #include "kdtree.h"
#include "bits/stdc++.h"
#include <device_launch_parameters.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#define BLK_SZ 256
using namespace thrust;
__global__ void copy_dim(float *X, int *idArr, float *auxArr, int *starts, int n, int d, int dim)
{
for(int tid=blockIdx.x*BLK_SZ + threadIdx.x; tid<n; tid+=BLK_SZ*gridDim.x)
{
int start=starts[tid], idx=idArr[tid];
if(tid<start) continue;
auxArr[tid] = X[idx*d + dim];
}
}
__global__ static void make_segments(int *segments, int *starts, int *ends, int n)
{
for(int tid=blockIdx.x*BLK_SZ + threadIdx.x; tid<n; tid+=BLK_SZ*gridDim.x)
{
int start=starts[tid], end=ends[tid];
segments[tid] = (tid<=start) ? 1 : 0;
}
}
__global__ static void update_arrays(int *starts, int *ends, int n)
{
for(int tid=blockIdx.x*BLK_SZ + threadIdx.x; tid<n; tid+=BLK_SZ*gridDim.x)
{
int start=starts[tid], end=ends[tid];
starts[tid] = (tid<(start+end)/2) ? start : (start+end)/2 + 1;
ends [tid] = (tid<(start+end)/2) ? (start+end)/2 - 1 : end;
}
}
void recursiveBuildTree(kdtree *node, float *X, int n, int d, float *mdArr, int *idArr, int start, int end, int depth)
{
float(*dataArr)[d] = (float(*)[d])X;
node->axis = depth%d;
if(start==end)
{
node->idx = idArr[start];
node->p = dataArr[ idArr[start] ];
node->mc = 0.0;
node->left = node->right = NULL;
return;
}
node->idx = idArr[(start+end)/2];
node->p = dataArr[ idArr[(start+end)/2] ];
node->mc = mdArr[(start+end)/2];
node->right = (kdtree *)malloc(sizeof(kdtree));
// Recursion
recursiveBuildTree(node->right, X, n, d, mdArr, idArr, (start+end)/2+1, end, depth+1);
if(start<(start+end)/2)
{
node->left = (kdtree *)malloc(sizeof(kdtree));
recursiveBuildTree(node->left, X, n, d, mdArr, idArr, start, (start+end)/2 - 1, depth+1);
}
else node->left = NULL;
}
kdtree *buildkd(float *X, int n, int d)
{
int *idArr = (int *)malloc(n*sizeof(int));
float *mdArr = (float *)malloc(n*sizeof(int));
// Allocate Memory on Device
device_vector<float> dev_auxArr(n), dev_X(n*d);
device_vector<int> dev_idArr(n), dev_segments(n), dev_starts(n), dev_ends(n);
// Initialize Device Variables
copy(X, X+n*d, dev_X.begin());
sequence(dev_idArr.begin(), dev_idArr.end());
fill(dev_starts.begin(), dev_starts.end(), 0);
fill(dev_ends.begin(), dev_ends.end(), n-1);
// Build tree in GPU (level by level in parallel)
int numSMs;
cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, 0);
for(int i=0; i<floor(log2(n)); i++)
{
// Parallel Copy of Dim
copy_dim<<<32*numSMs, BLK_SZ>>>(raw_pointer_cast(&dev_X[0]), raw_pointer_cast(&dev_idArr[0]), raw_pointer_cast(&dev_auxArr[0]), raw_pointer_cast(&dev_starts[0]), n, d, i%d );
make_segments<<<32*numSMs, BLK_SZ>>>(raw_pointer_cast(&dev_segments[0]), raw_pointer_cast(&dev_starts[0]), raw_pointer_cast(&dev_ends[0]), n);
// Parallel Sorting of segments [start, end]
inclusive_scan(dev_segments.begin(), dev_segments.end(), dev_segments.begin());
stable_sort_by_key(dev_auxArr.begin(), dev_auxArr.end(), make_zip_iterator(make_tuple(dev_idArr.begin(), dev_segments.begin())));
stable_sort_by_key(dev_segments.begin(), dev_segments.end(), make_zip_iterator(make_tuple(dev_auxArr.begin(), dev_idArr.begin())));
// Update Arrays that show for each array position in which segment [start, end] it belongs to
update_arrays<<<32*numSMs, BLK_SZ>>>(raw_pointer_cast(&dev_starts[0]), raw_pointer_cast(&dev_ends[0]), n);
}
// Copy the result back to host
copy(dev_auxArr.begin(), dev_auxArr.end(), mdArr);
copy(dev_idArr.begin(), dev_idArr.end(), idArr);
// Tree build
kdtree *root = (kdtree *)malloc(sizeof(kdtree));
recursiveBuildTree(root, X, n, d, mdArr, idArr, 0, n-1, 0);
// Clean-up
free(idArr); free(mdArr);
return root;
}
float* getPoint(kdtree *node) {return node->p;}
float getMC(kdtree *node) {return node->mc;}
int getIdx(kdtree *node) {return node->idx;}
int getAxis(kdtree *node) {return node->axis;}
kdtree *getLeft(kdtree *node) {return node->left;}
kdtree *getRight(kdtree *node) {return node->right;} |
c0954c19c1b76c19b9dab7308dcf652afeeb5d28.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include <THH/THHApply.cuh>
#include "common.h"
template <typename Dtype, typename Acctype>
struct gatedLinearCSigMul_functor
{
__device__ void operator()(Dtype *target, const Dtype *sigTensor, const Dtype *mulTensor) const
{
const Acctype sigNum = Acctype(1)/(Acctype(1)+ exp(ScalarConvert<Dtype, Acctype>::to(-*sigTensor)));
const Dtype mulNum = *mulTensor;
*target = ScalarConvert<Acctype, Dtype>::to(sigNum * mulNum);
}
};
template<typename Dtype, typename Acctype>
struct gatedLinearDerivative
{
const int64_t stride_i_;
const int64_t stride_gI_;
gatedLinearDerivative(int64_t stride_i, int64_t stride_gI)
:stride_i_(stride_i), stride_gI_(stride_gI){}
__device__ void operator()(Dtype * gI, const Dtype * gO, const Dtype * input) const
{
const Dtype * sigTensor = input + stride_i_;
const Acctype sigNum = Acctype(1)/(Acctype(1)+ exp(ScalarConvert<Dtype, Acctype>::to(-*sigTensor)));
*gI = ScalarConvert<Acctype, Dtype>::to(sigNum * *gO);
Dtype * gIsecond = gI + stride_gI_;
*gIsecond = ScalarConvert<Acctype, Dtype>::to((Acctype(1) - sigNum) * sigNum * *gO * *input);
}
};
#include "generic/GatedLinearUnit.cu"
#include "THHGenerateFloatTypes.h"
| c0954c19c1b76c19b9dab7308dcf652afeeb5d28.cu | #include "THCUNN.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include <THC/THCApply.cuh>
#include "common.h"
template <typename Dtype, typename Acctype>
struct gatedLinearCSigMul_functor
{
__device__ void operator()(Dtype *target, const Dtype *sigTensor, const Dtype *mulTensor) const
{
const Acctype sigNum = Acctype(1)/(Acctype(1)+ exp(ScalarConvert<Dtype, Acctype>::to(-*sigTensor)));
const Dtype mulNum = *mulTensor;
*target = ScalarConvert<Acctype, Dtype>::to(sigNum * mulNum);
}
};
template<typename Dtype, typename Acctype>
struct gatedLinearDerivative
{
const int64_t stride_i_;
const int64_t stride_gI_;
gatedLinearDerivative(int64_t stride_i, int64_t stride_gI)
:stride_i_(stride_i), stride_gI_(stride_gI){}
__device__ void operator()(Dtype * gI, const Dtype * gO, const Dtype * input) const
{
const Dtype * sigTensor = input + stride_i_;
const Acctype sigNum = Acctype(1)/(Acctype(1)+ exp(ScalarConvert<Dtype, Acctype>::to(-*sigTensor)));
*gI = ScalarConvert<Acctype, Dtype>::to(sigNum * *gO);
Dtype * gIsecond = gI + stride_gI_;
*gIsecond = ScalarConvert<Acctype, Dtype>::to((Acctype(1) - sigNum) * sigNum * *gO * *input);
}
};
#include "generic/GatedLinearUnit.cu"
#include "THCGenerateFloatTypes.h"
|
d5c4e4ce3b619bd7642cfe127eaf86efefbf94b4.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include "../../../src/common/device_helpers.cuh"
#include "../helpers.h"
#include "gtest/gtest.h"
using xgboost::common::Span;
void CreateTestData(xgboost::bst_uint num_rows, int max_row_size,
thrust::host_vector<int> *row_ptr,
thrust::host_vector<xgboost::bst_uint> *rows) {
row_ptr->resize(num_rows + 1);
int sum = 0;
for (xgboost::bst_uint i = 0; i <= num_rows; i++) {
(*row_ptr)[i] = sum;
sum += rand() % max_row_size; // NOLINT
if (i < num_rows) {
for (int j = (*row_ptr)[i]; j < sum; j++) {
(*rows).push_back(i);
}
}
}
}
void TestLbs() {
srand(17);
dh::CubMemory temp_memory;
std::vector<int> test_rows = {4, 100, 1000};
std::vector<int> test_max_row_sizes = {4, 100, 1300};
for (auto num_rows : test_rows) {
for (auto max_row_size : test_max_row_sizes) {
thrust::host_vector<int> h_row_ptr;
thrust::host_vector<xgboost::bst_uint> h_rows;
CreateTestData(num_rows, max_row_size, &h_row_ptr, &h_rows);
thrust::device_vector<size_t> row_ptr = h_row_ptr;
thrust::device_vector<int> output_row(h_rows.size());
auto d_output_row = output_row.data();
dh::TransformLbs(0, &temp_memory, h_rows.size(), dh::Raw(row_ptr),
row_ptr.size() - 1, false,
[=] __device__(size_t idx, size_t ridx) {
d_output_row[idx] = ridx;
});
dh::safe_cuda(hipDeviceSynchronize());
ASSERT_TRUE(h_rows == output_row);
}
}
}
TEST(cub_lbs, Test) {
TestLbs();
}
TEST(sumReduce, Test) {
thrust::device_vector<float> data(100, 1.0f);
dh::CubMemory temp;
auto sum = dh::SumReduction(temp, dh::Raw(data), data.size());
ASSERT_NEAR(sum, 100.0f, 1e-5);
}
void TestAllocator() {
int n = 10;
Span<float> a;
Span<int> b;
Span<size_t> c;
dh::BulkAllocator ba;
ba.Allocate(0, &a, n, &b, n, &c, n);
// Should be no illegal memory accesses
dh::LaunchN(0, n, [=] __device__(size_t idx) { c[idx] = a[idx] + b[idx]; });
dh::safe_cuda(hipDeviceSynchronize());
}
// Define the test in a function so we can use device lambda
TEST(bulkAllocator, Test) {
TestAllocator();
}
| d5c4e4ce3b619bd7642cfe127eaf86efefbf94b4.cu |
/*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include "../../../src/common/device_helpers.cuh"
#include "../helpers.h"
#include "gtest/gtest.h"
using xgboost::common::Span;
void CreateTestData(xgboost::bst_uint num_rows, int max_row_size,
thrust::host_vector<int> *row_ptr,
thrust::host_vector<xgboost::bst_uint> *rows) {
row_ptr->resize(num_rows + 1);
int sum = 0;
for (xgboost::bst_uint i = 0; i <= num_rows; i++) {
(*row_ptr)[i] = sum;
sum += rand() % max_row_size; // NOLINT
if (i < num_rows) {
for (int j = (*row_ptr)[i]; j < sum; j++) {
(*rows).push_back(i);
}
}
}
}
void TestLbs() {
srand(17);
dh::CubMemory temp_memory;
std::vector<int> test_rows = {4, 100, 1000};
std::vector<int> test_max_row_sizes = {4, 100, 1300};
for (auto num_rows : test_rows) {
for (auto max_row_size : test_max_row_sizes) {
thrust::host_vector<int> h_row_ptr;
thrust::host_vector<xgboost::bst_uint> h_rows;
CreateTestData(num_rows, max_row_size, &h_row_ptr, &h_rows);
thrust::device_vector<size_t> row_ptr = h_row_ptr;
thrust::device_vector<int> output_row(h_rows.size());
auto d_output_row = output_row.data();
dh::TransformLbs(0, &temp_memory, h_rows.size(), dh::Raw(row_ptr),
row_ptr.size() - 1, false,
[=] __device__(size_t idx, size_t ridx) {
d_output_row[idx] = ridx;
});
dh::safe_cuda(cudaDeviceSynchronize());
ASSERT_TRUE(h_rows == output_row);
}
}
}
TEST(cub_lbs, Test) {
TestLbs();
}
TEST(sumReduce, Test) {
thrust::device_vector<float> data(100, 1.0f);
dh::CubMemory temp;
auto sum = dh::SumReduction(temp, dh::Raw(data), data.size());
ASSERT_NEAR(sum, 100.0f, 1e-5);
}
void TestAllocator() {
int n = 10;
Span<float> a;
Span<int> b;
Span<size_t> c;
dh::BulkAllocator ba;
ba.Allocate(0, &a, n, &b, n, &c, n);
// Should be no illegal memory accesses
dh::LaunchN(0, n, [=] __device__(size_t idx) { c[idx] = a[idx] + b[idx]; });
dh::safe_cuda(cudaDeviceSynchronize());
}
// Define the test in a function so we can use device lambda
TEST(bulkAllocator, Test) {
TestAllocator();
}
|
5e4612149676da50afa59afc32edfa4b8f5e8954.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
__global__ void vecAddKernel(float* A, float* B, float* C, int n) {
// Calculate global thread index based on the block and thread indices ----
//INSERT KERNEL CODE HERE
int i = blockDim.x*blockIdx.x+threadIdx.x;
// Use global index to determine which elements to read, add, and write ---
//INSERT KERNEL CODE HERE
if (i<n) C[i] = A[i] + B[i];
}
| 5e4612149676da50afa59afc32edfa4b8f5e8954.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
__global__ void vecAddKernel(float* A, float* B, float* C, int n) {
// Calculate global thread index based on the block and thread indices ----
//INSERT KERNEL CODE HERE
int i = blockDim.x*blockIdx.x+threadIdx.x;
// Use global index to determine which elements to read, add, and write ---
//INSERT KERNEL CODE HERE
if (i<n) C[i] = A[i] + B[i];
}
|
776b43ca53715d846e5949cae60f64b991e1cafd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <comm_quda.h>
#include <pgauge_monte.h>
#include <gauge_tools.h>
#include <random_quda.h>
#include <index_helper.cuh>
#include <atomic.cuh>
#include <hipcub/hipcub.hpp>
#ifndef PI
#define PI 3.1415926535897932384626433832795 // pi
#endif
#ifndef PII
#define PII 6.2831853071795864769252867665590 // 2 * pi
#endif
namespace quda {
#ifdef GPU_GAUGE_ALG
/**
@brief Calculate the SU(2) index block in the SU(Nc) matrix
@param block number to calculate the index's, the total number of blocks is NCOLORS * ( NCOLORS - 1) / 2.
@return Returns two index's in int2 type, accessed by .x and .y.
*/
template<int NCOLORS>
__host__ __device__ static inline int2 IndexBlock(int block){
int2 id;
int i1;
int found = 0;
int del_i = 0;
int index = -1;
while ( del_i < (NCOLORS - 1) && found == 0 ) {
del_i++;
for ( i1 = 0; i1 < (NCOLORS - del_i); i1++ ) {
index++;
if ( index == block ) {
found = 1;
break;
}
}
}
id.y = i1 + del_i;
id.x = i1;
return id;
}
/**
@brief Calculate the SU(2) index block in the SU(Nc) matrix
@param block number to calculate de index's, the total number of blocks is NCOLORS * ( NCOLORS - 1) / 2.
@param p store the first index
@param q store the second index
*/
template<int NCOLORS>
__host__ __device__ static inline void IndexBlock(int block, int &p, int &q){
if ( NCOLORS == 3 ) {
if ( block == 0 ) { p = 0; q = 1; }
else if ( block == 1 ) { p = 1; q = 2; }
else{ p = 0; q = 2; }
}
else if ( NCOLORS > 3 ) {
int i1;
int found = 0;
int del_i = 0;
int index = -1;
while ( del_i < (NCOLORS - 1) && found == 0 ) {
del_i++;
for ( i1 = 0; i1 < (NCOLORS - del_i); i1++ ) {
index++;
if ( index == block ) {
found = 1;
break;
}
}
}
q = i1 + del_i;
p = i1;
}
}
/**
@brief Generate full SU(2) matrix (four real numbers instead of 2x2 complex matrix) and update link matrix.
Get from MILC code.
@param al weight
@param localstate CURAND rng state
*/
template <class T>
__device__ static inline Matrix<T,2> generate_su2_matrix_milc(T al, cuRNGState& localState){
T xr1, xr2, xr3, xr4, d, r;
int k;
xr1 = Random<T>(localState);
xr1 = (log((xr1 + 1.e-10)));
xr2 = Random<T>(localState);
xr2 = (log((xr2 + 1.e-10)));
xr3 = Random<T>(localState);
xr4 = Random<T>(localState);
xr3 = cos(PII * xr3);
d = -(xr2 + xr1 * xr3 * xr3 ) / al;
//now beat each site into submission
int nacd = 0;
if ((1.00 - 0.5 * d) > xr4 * xr4 ) nacd = 1;
if ( nacd == 0 && al > 2.0 ) { //k-p algorithm
for ( k = 0; k < 20; k++ ) {
//get four random numbers (add a small increment to prevent taking log(0.)
xr1 = Random<T>(localState);
xr1 = (log((xr1 + 1.e-10)));
xr2 = Random<T>(localState);
xr2 = (log((xr2 + 1.e-10)));
xr3 = Random<T>(localState);
xr4 = Random<T>(localState);
xr3 = cos(PII * xr3);
d = -(xr2 + xr1 * xr3 * xr3) / al;
if ((1.00 - 0.5 * d) > xr4 * xr4 ) break;
}
} //endif nacd
Matrix<T,2> a;
if ( nacd == 0 && al <= 2.0 ) { //creutz algorithm
xr3 = exp(-2.0 * al);
xr4 = 1.0 - xr3;
for ( k = 0; k < 20; k++ ) {
//get two random numbers
xr1 = Random<T>(localState);
xr2 = Random<T>(localState);
r = xr3 + xr4 * xr1;
a(0,0) = 1.00 + log(r) / al;
if ((1.0 - a(0,0) * a(0,0)) > xr2 * xr2 ) break;
}
d = 1.0 - a(0,0);
} //endif nacd
//generate the four su(2) elements
//find a0 = 1 - d
a(0,0) = 1.0 - d;
//compute r
xr3 = 1.0 - a(0,0) * a(0,0);
xr3 = abs(xr3);
r = sqrt(xr3);
//compute a3
a(1,1) = (2.0 * Random<T>(localState) - 1.0) * r;
//compute a1 and a2
xr1 = xr3 - a(1,1) * a(1,1);
xr1 = abs(xr1);
xr1 = sqrt(xr1);
//xr2 is a random number between 0 and 2*pi
xr2 = PII * Random<T>(localState);
a(0,1) = xr1 * cos(xr2);
a(1,0) = xr1 * sin(xr2);
return a;
}
/**
@brief Return SU(2) subgroup (4 real numbers) from SU(3) matrix
@param tmp1 input SU(3) matrix
@param block to retrieve from 0 to 2.
@return 4 real numbers
*/
template < class T>
__host__ __device__ static inline Matrix<T,2> get_block_su2( Matrix<typename ComplexTypeId<T>::Type,3> tmp1, int block ){
Matrix<T,2> r;
switch ( block ) {
case 0:
r(0,0) = tmp1(0,0).x + tmp1(1,1).x;
r(0,1) = tmp1(0,1).y + tmp1(1,0).y;
r(1,0) = tmp1(0,1).x - tmp1(1,0).x;
r(1,1) = tmp1(0,0).y - tmp1(1,1).y;
break;
case 1:
r(0,0) = tmp1(1,1).x + tmp1(2,2).x;
r(0,1) = tmp1(1,2).y + tmp1(2,1).y;
r(1,0) = tmp1(1,2).x - tmp1(2,1).x;
r(1,1) = tmp1(1,1).y - tmp1(2,2).y;
break;
case 2:
r(0,0) = tmp1(0,0).x + tmp1(2,2).x;
r(0,1) = tmp1(0,2).y + tmp1(2,0).y;
r(1,0) = tmp1(0,2).x - tmp1(2,0).x;
r(1,1) = tmp1(0,0).y - tmp1(2,2).y;
break;
}
return r;
}
/**
@brief Return SU(2) subgroup (4 real numbers) from SU(Nc) matrix
@param tmp1 input SU(Nc) matrix
@param id the two indices to retrieve SU(2) block
@return 4 real numbers
*/
template <class T, int NCOLORS>
__host__ __device__ static inline Matrix<T,2> get_block_su2( Matrix<typename ComplexTypeId<T>::Type,NCOLORS> tmp1, int2 id ){
Matrix<T,2> r;
r(0,0) = tmp1(id.x,id.x).x + tmp1(id.y,id.y).x;
r(0,1) = tmp1(id.x,id.y).y + tmp1(id.y,id.x).y;
r(1,0) = tmp1(id.x,id.y).x - tmp1(id.y,id.x).x;
r(1,1) = tmp1(id.x,id.x).y - tmp1(id.y,id.y).y;
return r;
}
/**
@brief Create a SU(Nc) identity matrix and fills with the SU(2) block
@param rr SU(2) matrix represented only by four real numbers
@param id the two indices to fill in the SU(3) matrix
@return SU(Nc) matrix
*/
template <class T, int NCOLORS>
__host__ __device__ static inline Matrix<typename ComplexTypeId<T>::Type,NCOLORS> block_su2_to_sun( Matrix<T,2> rr, int2 id ){
Matrix<typename ComplexTypeId<T>::Type,NCOLORS> tmp1;
setIdentity(&tmp1);
tmp1(id.x,id.x) = makeComplex( rr(0,0), rr(1,1) );
tmp1(id.x,id.y) = makeComplex( rr(1,0), rr(0,1) );
tmp1(id.y,id.x) = makeComplex(-rr(1,0), rr(0,1) );
tmp1(id.y,id.y) = makeComplex( rr(0,0),-rr(1,1) );
return tmp1;
}
/**
@brief Update the SU(Nc) link with the new SU(2) matrix, link <- u * link
@param u SU(2) matrix represented by four real numbers
@param link SU(Nc) matrix
@param id indices
*/
template <class T, int NCOLORS>
__host__ __device__ static inline void mul_block_sun( Matrix<T,2> u, Matrix<typename ComplexTypeId<T>::Type,NCOLORS> &link, int2 id ){
typename ComplexTypeId<T>::Type tmp;
for ( int j = 0; j < NCOLORS; j++ ) {
tmp = makeComplex( u(0,0), u(1,1) ) * link(id.x, j) + makeComplex( u(1,0), u(0,1) ) * link(id.y, j);
link(id.y, j) = makeComplex(-u(1,0), u(0,1) ) * link(id.x, j) + makeComplex( u(0,0),-u(1,1) ) * link(id.y, j);
link(id.x, j) = tmp;
}
}
/**
@brief Update the SU(3) link with the new SU(2) matrix, link <- u * link
@param U SU(3) matrix
@param a00 element (0,0) of the SU(2) matrix
@param a01 element (0,1) of the SU(2) matrix
@param a10 element (1,0) of the SU(2) matrix
@param a11 element (1,1) of the SU(2) matrix
@param block of the SU(3) matrix, 0,1 or 2
*/
template <class Cmplx>
__host__ __device__ static inline void block_su2_to_su3( Matrix<Cmplx,3> &U, Cmplx a00, Cmplx a01, Cmplx a10, Cmplx a11, int block ){
Cmplx tmp;
switch ( block ) {
case 0:
tmp = a00 * U(0,0) + a01 * U(1,0);
U(1,0) = a10 * U(0,0) + a11 * U(1,0);
U(0,0) = tmp;
tmp = a00 * U(0,1) + a01 * U(1,1);
U(1,1) = a10 * U(0,1) + a11 * U(1,1);
U(0,1) = tmp;
tmp = a00 * U(0,2) + a01 * U(1,2);
U(1,2) = a10 * U(0,2) + a11 * U(1,2);
U(0,2) = tmp;
break;
case 1:
tmp = a00 * U(1,0) + a01 * U(2,0);
U(2,0) = a10 * U(1,0) + a11 * U(2,0);
U(1,0) = tmp;
tmp = a00 * U(1,1) + a01 * U(2,1);
U(2,1) = a10 * U(1,1) + a11 * U(2,1);
U(1,1) = tmp;
tmp = a00 * U(1,2) + a01 * U(2,2);
U(2,2) = a10 * U(1,2) + a11 * U(2,2);
U(1,2) = tmp;
break;
case 2:
tmp = a00 * U(0,0) + a01 * U(2,0);
U(2,0) = a10 * U(0,0) + a11 * U(2,0);
U(0,0) = tmp;
tmp = a00 * U(0,1) + a01 * U(2,1);
U(2,1) = a10 * U(0,1) + a11 * U(2,1);
U(0,1) = tmp;
tmp = a00 * U(0,2) + a01 * U(2,2);
U(2,2) = a10 * U(0,2) + a11 * U(2,2);
U(0,2) = tmp;
break;
}
}
// v * u^dagger
template <class Float>
__host__ __device__ static inline Matrix<Float,2> mulsu2UVDagger(Matrix<Float,2> v, Matrix<Float,2> u){
Matrix<Float,2> b;
b(0,0) = v(0,0) * u(0,0) + v(0,1) * u(0,1) + v(1,0) * u(1,0) + v(1,1) * u(1,1);
b(0,1) = v(0,1) * u(0,0) - v(0,0) * u(0,1) + v(1,0) * u(1,1) - v(1,1) * u(1,0);
b(1,0) = v(1,0) * u(0,0) - v(0,0) * u(1,0) + v(1,1) * u(0,1) - v(0,1) * u(1,1);
b(1,1) = v(1,1) * u(0,0) - v(0,0) * u(1,1) + v(0,1) * u(1,0) - v(1,0) * u(0,1);
return b;
}
/**
@brief Link update by pseudo-heatbath
@param U link to be updated
@param F staple
@param localstate CURAND rng state
*/
template <class Float, int NCOLORS>
__device__ inline void heatBathSUN( Matrix<typename ComplexTypeId<Float>::Type,NCOLORS>& U, Matrix<typename ComplexTypeId<Float>::Type,NCOLORS> F, \
cuRNGState& localState, Float BetaOverNc ){
typedef typename ComplexTypeId<Float>::Type Cmplx;
if ( NCOLORS == 3 ) {
//////////////////////////////////////////////////////////////////
/*
for( int block = 0; block < NCOLORS; block++ ) {
Matrix<typename ComplexTypeId<T>::Type,3> tmp1 = U * F;
Matrix<T,2> r = get_block_su2<T>(tmp1, block);
T k = sqrt(r(0,0)*r(0,0)+r(0,1)*r(0,1)+r(1,0)*r(1,0)+r(1,1)*r(1,1));
T ap = BetaOverNc * k;
k = (T)1.0 / k;
r *= k;
//Matrix<T,2> a = generate_su2_matrix<T4, T>(ap, localState);
Matrix<T,2> a = generate_su2_matrix_milc<T>(ap, localState);
r = mulsu2UVDagger_4<T>( a, r);
///////////////////////////////////////
block_su2_to_su3<T>( U, complex( r(0,0), r(1,1) ), complex( r(1,0), r(0,1) ), complex(-r(1,0), r(0,1) ), complex( r(0,0),-r(1,1) ), block );
//FLOP_min = (198 + 4 + 15 + 28 + 28 + 84) * 3 = 1071
}*/
//////////////////////////////////////////////////////////////////
for ( int block = 0; block < NCOLORS; block++ ) {
int p,q;
IndexBlock<NCOLORS>(block, p, q);
Cmplx a0 = makeComplex((Float)0.0, (Float)0.0);
Cmplx a1 = a0;
Cmplx a2 = a0;
Cmplx a3 = a0;
for ( int j = 0; j < NCOLORS; j++ ) {
a0 += U(p,j) * F(j,p);
a1 += U(p,j) * F(j,q);
a2 += U(q,j) * F(j,p);
a3 += U(q,j) * F(j,q);
}
Matrix<Float,2> r;
r(0,0) = a0.x + a3.x;
r(0,1) = a1.y + a2.y;
r(1,0) = a1.x - a2.x;
r(1,1) = a0.y - a3.y;
Float k = sqrt(r(0,0) * r(0,0) + r(0,1) * r(0,1) + r(1,0) * r(1,0) + r(1,1) * r(1,1));;
Float ap = BetaOverNc * k;
k = 1.0 / k;
r *= k;
Matrix<Float,2> a = generate_su2_matrix_milc<Float>(ap, localState);
r = mulsu2UVDagger<Float>( a, r);
///////////////////////////////////////
a0 = makeComplex( r(0,0), r(1,1) );
a1 = makeComplex( r(1,0), r(0,1) );
a2 = makeComplex(-r(1,0), r(0,1) );
a3 = makeComplex( r(0,0),-r(1,1) );
Cmplx tmp0;
for ( int j = 0; j < NCOLORS; j++ ) {
tmp0 = a0 * U(p,j) + a1 * U(q,j);
U(q,j) = a2 * U(p,j) + a3 * U(q,j);
U(p,j) = tmp0;
}
//FLOP_min = (NCOLORS * 64 + 19 + 28 + 28) * 3 = NCOLORS * 192 + 225
}
//////////////////////////////////////////////////////////////////
}
else if ( NCOLORS > 3 ) {
//////////////////////////////////////////////////////////////////
//TESTED IN SU(4) SP THIS IS WORST
Matrix<typename ComplexTypeId<Float>::Type,NCOLORS> M = U * F;
for ( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) {
int2 id = IndexBlock<NCOLORS>( block );
Matrix<Float,2> r = get_block_su2<Float>(M, id);
Float k = sqrt(r(0,0) * r(0,0) + r(0,1) * r(0,1) + r(1,0) * r(1,0) + r(1,1) * r(1,1));
Float ap = BetaOverNc * k;
k = 1.0 / k;
r *= k;
Matrix<Float,2> a = generate_su2_matrix_milc<Float>(ap, localState);
Matrix<Float,2> rr = mulsu2UVDagger<Float>( a, r);
///////////////////////////////////////
mul_block_sun<Float, NCOLORS>( rr, U, id);
mul_block_sun<Float, NCOLORS>( rr, M, id);
///////////////////////////////////////
}
/* / TESTED IN SU(4) SP THIS IS FASTER
for ( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) {
int2 id = IndexBlock<NCOLORS>( block );
complex a0 = complex::zero();
complex a1 = complex::zero();
complex a2 = complex::zero();
complex a3 = complex::zero();
for ( int j = 0; j < NCOLORS; j++ ) {
a0 += U(id.x, j) * F.e[j][id.x];
a1 += U(id.x, j) * F.e[j][id.y];
a2 += U(id.y, j) * F.e[j][id.x];
a3 += U(id.y, j) * F.e[j][id.y];
}
Matrix<T,2> r;
r(0,0) = a0.x + a3.x;
r(0,1) = a1.y + a2.y;
r(1,0) = a1.x - a2.x;
r(1,1) = a0.y - a3.y;
T k = sqrt(r(0,0) * r(0,0) + r(0,1) * r(0,1) + r(1,0) * r(1,0) + r(1,1) * r(1,1));
T ap = BetaOverNc * k;
k = (T)1.0 / k;
r *= k;
//Matrix<T,2> a = generate_su2_matrix<T4, T>(ap, localState);
Matrix<T,2> a = generate_su2_matrix_milc<T>(ap, localState);
r = mulsu2UVDagger<T>( a, r);
mul_block_sun<T>( r, U, id); * /
/*
a0 = complex( r(0,0), r(1,1) );
a1 = complex( r(1,0), r(0,1) );
a2 = complex(-r(1,0), r(0,1) );
a3 = complex( r(0,0),-r(1,1) );
complex tmp0;
for ( int j = 0; j < NCOLORS; j++ ) {
tmp0 = a0 * U(id.x, j) + a1 * U(id.y, j);
U(id.y, j) = a2 * U(id.x, j) + a3 * U(id.y, j);
U(id.x, j) = tmp0;
} */
// }
}
//////////////////////////////////////////////////////////////////
}
//////////////////////////////////////////////////////////////////////////
/**
@brief Link update by overrelaxation
@param U link to be updated
@param F staple
*/
template <class Float, int NCOLORS>
__device__ inline void overrelaxationSUN( Matrix<typename ComplexTypeId<Float>::Type,NCOLORS>& U, Matrix<typename ComplexTypeId<Float>::Type,NCOLORS> F ){
typedef typename ComplexTypeId<Float>::Type Cmplx;
if ( NCOLORS == 3 ) {
//////////////////////////////////////////////////////////////////
/*
for( int block = 0; block < 3; block++ ) {
Matrix<typename ComplexTypeId<T>::Type,3> tmp1 = U * F;
Matrix<T,2> r = get_block_su2<T>(tmp1, block);
//normalize and conjugate
Float norm = 1.0 / sqrt(r(0,0)*r(0,0)+r(0,1)*r(0,1)+r(1,0)*r(1,0)+r(1,1)*r(1,1));;
r(0,0) *= norm;
r(0,1) *= -norm;
r(1,0) *= -norm;
r(1,1) *= -norm;
///////////////////////////////////////
complex a00 = complex( r(0,0), r(1,1) );
complex a01 = complex( r(1,0), r(0,1) );
complex a10 = complex(-r(1,0), r(0,1) );
complex a11 = complex( r(0,0),-r(1,1) );
block_su2_to_su3<T>( U, a00, a01, a10, a11, block );
block_su2_to_su3<T>( U, a00, a01, a10, a11, block );
//FLOP = (198 + 17 + 84 * 2) * 3 = 1149
}*/
///////////////////////////////////////////////////////////////////
//This version does not need to multiply all matrix at each block: tmp1 = U * F;
//////////////////////////////////////////////////////////////////
for ( int block = 0; block < 3; block++ ) {
int p,q;
IndexBlock<NCOLORS>(block, p, q);
Cmplx a0 = makeComplex((Float)0., (Float)0.);
Cmplx a1 = a0;
Cmplx a2 = a0;
Cmplx a3 = a0;
for ( int j = 0; j < NCOLORS; j++ ) {
a0 += U(p,j) * F(j,p);
a1 += U(p,j) * F(j,q);
a2 += U(q,j) * F(j,p);
a3 += U(q,j) * F(j,q);
}
Matrix<Float,2> r;
r(0,0) = a0.x + a3.x;
r(0,1) = a1.y + a2.y;
r(1,0) = a1.x - a2.x;
r(1,1) = a0.y - a3.y;
//normalize and conjugate
//r = r.conj_normalize();
Float norm = 1.0 / sqrt(r(0,0) * r(0,0) + r(0,1) * r(0,1) + r(1,0) * r(1,0) + r(1,1) * r(1,1));;
r(0,0) *= norm;
r(0,1) *= -norm;
r(1,0) *= -norm;
r(1,1) *= -norm;
///////////////////////////////////////
a0 = makeComplex( r(0,0), r(1,1) );
a1 = makeComplex( r(1,0), r(0,1) );
a2 = makeComplex(-r(1,0), r(0,1) );
a3 = makeComplex( r(0,0),-r(1,1) );
Cmplx tmp0, tmp1;
for ( int j = 0; j < NCOLORS; j++ ) {
tmp0 = a0 * U(p,j) + a1 * U(q,j);
tmp1 = a2 * U(p,j) + a3 * U(q,j);
U(p,j) = a0 * tmp0 + a1 * tmp1;
U(q,j) = a2 * tmp0 + a3 * tmp1;
}
//FLOP = (NCOLORS * 88 + 17) * 3
}
///////////////////////////////////////////////////////////////////
}
else if ( NCOLORS > 3 ) {
///////////////////////////////////////////////////////////////////
Matrix<typename ComplexTypeId<Float>::Type,NCOLORS> M = U * F;
for ( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) {
int2 id = IndexBlock<NCOLORS>( block );
Matrix<Float,2> r = get_block_su2<Float, NCOLORS>(M, id);
//normalize and conjugate
Float norm = 1.0 / sqrt(r(0,0) * r(0,0) + r(0,1) * r(0,1) + r(1,0) * r(1,0) + r(1,1) * r(1,1));;
r(0,0) *= norm;
r(0,1) *= -norm;
r(1,0) *= -norm;
r(1,1) *= -norm;
mul_block_sun<Float, NCOLORS>( r, U, id);
mul_block_sun<Float, NCOLORS>( r, U, id);
mul_block_sun<Float, NCOLORS>( r, M, id);
mul_block_sun<Float, NCOLORS>( r, M, id);
///////////////////////////////////////
}
/* //TESTED IN SU(4) SP THIS IS WORST
for( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) {
int2 id = IndexBlock<NCOLORS>( block );
complex a0 = complex::zero();
complex a1 = complex::zero();
complex a2 = complex::zero();
complex a3 = complex::zero();
for(int j = 0; j < NCOLORS; j++){
a0 += U(id.x, j) * F.e[j][id.x];
a1 += U(id.x, j) * F.e[j][id.y];
a2 += U(id.y, j) * F.e[j][id.x];
a3 += U(id.y, j) * F.e[j][id.y];
}
Matrix<T,2> r;
r(0,0) = a0.x + a3.x;
r(0,1) = a1.y + a2.y;
r(1,0) = a1.x - a2.x;
r(1,1) = a0.y - a3.y;
//normalize and conjugate
Float norm = 1.0 / sqrt(r(0,0)*r(0,0)+r(0,1)*r(0,1)+r(1,0)*r(1,0)+r(1,1)*r(1,1));;
r(0,0) *= norm;
r(0,1) *= -norm;
r(1,0) *= -norm;
r(1,1) *= -norm;
//mul_block_sun<T>( r, U, id);
//mul_block_sun<T>( r, U, id);
///////////////////////////////////////
a0 = complex( r(0,0), r(1,1) );
a1 = complex( r(1,0), r(0,1) );
a2 = complex(-r(1,0), r(0,1) );
a3 = complex( r(0,0),-r(1,1) );
complex tmp0, tmp1;
for(int j = 0; j < NCOLORS; j++){
tmp0 = a0 * U(id.x, j) + a1 * U(id.y, j);
tmp1 = a2 * U(id.x, j) + a3 * U(id.y, j);
U(id.x, j) = a0 * tmp0 + a1 * tmp1;
U(id.y, j) = a2 * tmp0 + a3 * tmp1;
}
}
*/
}
}
template <typename Gauge, typename Float, int NCOLORS>
struct MonteArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
cudaGaugeField &data;
Float BetaOverNc;
RNG rngstate;
MonteArg(const Gauge &dataOr, cudaGaugeField & data, Float Beta, RNG &rngstate)
: dataOr(dataOr), data(data), rngstate(rngstate) {
BetaOverNc = Beta / (Float)NCOLORS;
#ifdef MULTI_GPU
for ( int dir = 0; dir < 4; ++dir ) {
border[dir] = data.R()[dir];
X[dir] = data.X()[dir] - border[dir] * 2;
}
#else
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
#endif
threads = X[0] * X[1] * X[2] * X[3] >> 1;
}
};
template<typename Float, typename Gauge, int NCOLORS, bool HeatbathOrRelax>
__global__ void compute_heatBath(MonteArg<Gauge, Float, NCOLORS> arg, int mu, int parity){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx >= arg.threads ) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
int id = idx;
int X[4];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
int x[4];
getCoords(x, idx, X, parity);
#ifdef MULTI_GPU
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += arg.border[dr];
X[dr] += 2 * arg.border[dr];
}
idx = linkIndex(x,X);
#endif
Matrix<Cmplx,NCOLORS> staple;
setZero(&staple);
Matrix<Cmplx,NCOLORS> U;
for ( int nu = 0; nu < 4; nu++ ) if ( mu != nu ) {
int dx[4] = { 0, 0, 0, 0 };
Matrix<Cmplx,NCOLORS> link;
arg.dataOr.load((Float*)(link.data), idx, nu, parity);
dx[nu]++;
arg.dataOr.load((Float*)(U.data), linkIndexShift(x,dx,X), mu, 1 - parity);
link *= U;
dx[nu]--;
dx[mu]++;
arg.dataOr.load((Float*)(U.data), linkIndexShift(x,dx,X), nu, 1 - parity);
link *= conj(U);
staple += link;
dx[mu]--;
dx[nu]--;
arg.dataOr.load((Float*)(link.data), linkIndexShift(x,dx,X), nu, 1 - parity);
arg.dataOr.load((Float*)(U.data), linkIndexShift(x,dx,X), mu, 1 - parity);
link = conj(link) * U;
dx[mu]++;
arg.dataOr.load((Float*)(U.data), linkIndexShift(x,dx,X), nu, parity);
link *= U;
staple += link;
}
arg.dataOr.load((Float*)(U.data), idx, mu, parity);
if ( HeatbathOrRelax ) {
cuRNGState localState = arg.rngstate.State()[ id ];
heatBathSUN<Float, NCOLORS>( U, conj(staple), localState, arg.BetaOverNc );
arg.rngstate.State()[ id ] = localState;
}
else{
overrelaxationSUN<Float, NCOLORS>( U, conj(staple) );
}
arg.dataOr.save((Float*)(U.data), idx, mu, parity);
}
template<typename Float, typename Gauge, int NCOLORS, int NElems, bool HeatbathOrRelax>
class GaugeHB : Tunable {
MonteArg<Gauge, Float, NCOLORS> arg;
int mu;
int parity;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeHB(MonteArg<Gauge, Float, NCOLORS> &arg)
: arg(arg), mu(0), parity(0) {
}
~GaugeHB () {
}
void SetParam(int _mu, int _parity){
mu = _mu;
parity = _parity;
}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
compute_heatBath<Float, Gauge, NCOLORS, HeatbathOrRelax ><< < tp.grid,tp.block, tp.shared_bytes, stream >> > (arg, mu, parity);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune() {
arg.data.backup();
if(HeatbathOrRelax) arg.rngstate.backup();
}
void postTune() {
arg.data.restore();
if(HeatbathOrRelax) arg.rngstate.restore();
}
long long flops() const {
//NEED TO CHECK THIS!!!!!!
if ( NCOLORS == 3 ) {
long long flop = 2268LL;
if ( HeatbathOrRelax ) {
flop += 801LL;
}
else{
flop += 843LL;
}
flop *= arg.threads;
return flop;
}
else{
long long flop = NCOLORS * NCOLORS * NCOLORS * 84LL;
if ( HeatbathOrRelax ) {
flop += NCOLORS * NCOLORS * NCOLORS + (NCOLORS * ( NCOLORS - 1) / 2) * (46LL + 48LL + 56LL * NCOLORS);
}
else{
flop += NCOLORS * NCOLORS * NCOLORS + (NCOLORS * ( NCOLORS - 1) / 2) * (17LL + 112LL * NCOLORS);
}
flop *= arg.threads;
return flop;
}
}
long long bytes() const {
//NEED TO CHECK THIS!!!!!!
if ( NCOLORS == 3 ) {
long long byte = 20LL * NElems * sizeof(Float);
if ( HeatbathOrRelax ) byte += 2LL * sizeof(cuRNGState);
byte *= arg.threads;
return byte;
}
else{
long long byte = 20LL * NCOLORS * NCOLORS * 2 * sizeof(Float);
if ( HeatbathOrRelax ) byte += 2LL * sizeof(cuRNGState);
byte *= arg.threads;
return byte;
}
}
};
template<typename Float, int NElems, int NCOLORS, typename Gauge>
void Monte( Gauge dataOr, cudaGaugeField& data, RNG &rngstate, Float Beta, unsigned int nhb, unsigned int nover) {
TimeProfile profileHBOVR("HeatBath_OR_Relax", false);
MonteArg<Gauge, Float, NCOLORS> montearg(dataOr, data, Beta, rngstate);
if ( getVerbosity() >= QUDA_SUMMARIZE ) profileHBOVR.TPSTART(QUDA_PROFILE_COMPUTE);
GaugeHB<Float, Gauge, NCOLORS, NElems, true> hb(montearg);
for ( int step = 0; step < nhb; ++step ) {
for ( int parity = 0; parity < 2; ++parity ) {
for ( int mu = 0; mu < 4; ++mu ) {
hb.SetParam(mu, parity);
hb.apply(0);
#ifdef MULTI_GPU
PGaugeExchange( data, mu, parity);
#endif
}
}
}
if ( getVerbosity() >= QUDA_SUMMARIZE ) {
hipDeviceSynchronize();
profileHBOVR.TPSTOP(QUDA_PROFILE_COMPUTE);
double secs = profileHBOVR.Last(QUDA_PROFILE_COMPUTE);
double gflops = (hb.flops() * 8 * nhb * 1e-9) / (secs);
double gbytes = hb.bytes() * 8 * nhb / (secs * 1e9);
#ifdef MULTI_GPU
printfQuda("HB: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops * comm_size(), gbytes * comm_size());
#else
printfQuda("HB: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
#endif
}
if ( getVerbosity() >= QUDA_SUMMARIZE ) profileHBOVR.TPSTART(QUDA_PROFILE_COMPUTE);
GaugeHB<Float, Gauge, NCOLORS, NElems, false> relax(montearg);
for ( int step = 0; step < nover; ++step ) {
for ( int parity = 0; parity < 2; ++parity ) {
for ( int mu = 0; mu < 4; ++mu ) {
relax.SetParam(mu, parity);
relax.apply(0);
#ifdef MULTI_GPU
PGaugeExchange( data, mu, parity);
#endif
}
}
}
if ( getVerbosity() >= QUDA_SUMMARIZE ) {
hipDeviceSynchronize();
profileHBOVR.TPSTOP(QUDA_PROFILE_COMPUTE);
double secs = profileHBOVR.Last(QUDA_PROFILE_COMPUTE);
double gflops = (relax.flops() * 8 * nover * 1e-9) / (secs);
double gbytes = relax.bytes() * 8 * nover / (secs * 1e9);
#ifdef MULTI_GPU
printfQuda("OVR: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops * comm_size(), gbytes * comm_size());
#else
printfQuda("OVR: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
#endif
}
}
template<typename Float>
void Monte( cudaGaugeField& data, RNG &rngstate, Float Beta, unsigned int nhb, unsigned int nover) {
if ( data.isNative() ) {
if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
Monte<Float, 18, 3>(Gauge(data), data, rngstate, Beta, nhb, nover);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
Monte<Float, 12, 3>(Gauge(data), data, rngstate, Beta, nhb, nover);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
Monte<Float, 8, 3>(Gauge(data), data, rngstate, Beta, nhb, nover);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
}
#endif // GPU_GAUGE_ALG
/** @brief Perform heatbath and overrelaxation. Performs nhb heatbath steps followed by nover overrelaxation steps.
*
* @param[in,out] data Gauge field
* @param[in,out] rngstate state of the CURAND random number generator
* @param[in] Beta inverse of the gauge coupling, beta = 2 Nc / g_0^2
* @param[in] nhb number of heatbath steps
* @param[in] nover number of overrelaxation steps
*/
void Monte( cudaGaugeField& data, RNG &rngstate, double Beta, unsigned int nhb, unsigned int nover) {
#ifdef GPU_GAUGE_ALG
if ( data.Precision() == QUDA_SINGLE_PRECISION ) {
Monte<float> (data, rngstate, (float)Beta, nhb, nover);
} else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) {
Monte<double>(data, rngstate, Beta, nhb, nover);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Pure gauge code has not been built");
#endif // GPU_GAUGE_ALG
}
}
| 776b43ca53715d846e5949cae60f64b991e1cafd.cu | #include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <comm_quda.h>
#include <pgauge_monte.h>
#include <gauge_tools.h>
#include <random_quda.h>
#include <index_helper.cuh>
#include <atomic.cuh>
#include <cub/cub.cuh>
#ifndef PI
#define PI 3.1415926535897932384626433832795 // pi
#endif
#ifndef PII
#define PII 6.2831853071795864769252867665590 // 2 * pi
#endif
namespace quda {
#ifdef GPU_GAUGE_ALG
/**
@brief Calculate the SU(2) index block in the SU(Nc) matrix
@param block number to calculate the index's, the total number of blocks is NCOLORS * ( NCOLORS - 1) / 2.
@return Returns two index's in int2 type, accessed by .x and .y.
*/
template<int NCOLORS>
__host__ __device__ static inline int2 IndexBlock(int block){
int2 id;
int i1;
int found = 0;
int del_i = 0;
int index = -1;
while ( del_i < (NCOLORS - 1) && found == 0 ) {
del_i++;
for ( i1 = 0; i1 < (NCOLORS - del_i); i1++ ) {
index++;
if ( index == block ) {
found = 1;
break;
}
}
}
id.y = i1 + del_i;
id.x = i1;
return id;
}
/**
@brief Calculate the SU(2) index block in the SU(Nc) matrix
@param block number to calculate de index's, the total number of blocks is NCOLORS * ( NCOLORS - 1) / 2.
@param p store the first index
@param q store the second index
*/
template<int NCOLORS>
__host__ __device__ static inline void IndexBlock(int block, int &p, int &q){
if ( NCOLORS == 3 ) {
if ( block == 0 ) { p = 0; q = 1; }
else if ( block == 1 ) { p = 1; q = 2; }
else{ p = 0; q = 2; }
}
else if ( NCOLORS > 3 ) {
int i1;
int found = 0;
int del_i = 0;
int index = -1;
while ( del_i < (NCOLORS - 1) && found == 0 ) {
del_i++;
for ( i1 = 0; i1 < (NCOLORS - del_i); i1++ ) {
index++;
if ( index == block ) {
found = 1;
break;
}
}
}
q = i1 + del_i;
p = i1;
}
}
/**
@brief Generate full SU(2) matrix (four real numbers instead of 2x2 complex matrix) and update link matrix.
Get from MILC code.
@param al weight
@param localstate CURAND rng state
*/
template <class T>
__device__ static inline Matrix<T,2> generate_su2_matrix_milc(T al, cuRNGState& localState){
T xr1, xr2, xr3, xr4, d, r;
int k;
xr1 = Random<T>(localState);
xr1 = (log((xr1 + 1.e-10)));
xr2 = Random<T>(localState);
xr2 = (log((xr2 + 1.e-10)));
xr3 = Random<T>(localState);
xr4 = Random<T>(localState);
xr3 = cos(PII * xr3);
d = -(xr2 + xr1 * xr3 * xr3 ) / al;
//now beat each site into submission
int nacd = 0;
if ((1.00 - 0.5 * d) > xr4 * xr4 ) nacd = 1;
if ( nacd == 0 && al > 2.0 ) { //k-p algorithm
for ( k = 0; k < 20; k++ ) {
//get four random numbers (add a small increment to prevent taking log(0.)
xr1 = Random<T>(localState);
xr1 = (log((xr1 + 1.e-10)));
xr2 = Random<T>(localState);
xr2 = (log((xr2 + 1.e-10)));
xr3 = Random<T>(localState);
xr4 = Random<T>(localState);
xr3 = cos(PII * xr3);
d = -(xr2 + xr1 * xr3 * xr3) / al;
if ((1.00 - 0.5 * d) > xr4 * xr4 ) break;
}
} //endif nacd
Matrix<T,2> a;
if ( nacd == 0 && al <= 2.0 ) { //creutz algorithm
xr3 = exp(-2.0 * al);
xr4 = 1.0 - xr3;
for ( k = 0; k < 20; k++ ) {
//get two random numbers
xr1 = Random<T>(localState);
xr2 = Random<T>(localState);
r = xr3 + xr4 * xr1;
a(0,0) = 1.00 + log(r) / al;
if ((1.0 - a(0,0) * a(0,0)) > xr2 * xr2 ) break;
}
d = 1.0 - a(0,0);
} //endif nacd
//generate the four su(2) elements
//find a0 = 1 - d
a(0,0) = 1.0 - d;
//compute r
xr3 = 1.0 - a(0,0) * a(0,0);
xr3 = abs(xr3);
r = sqrt(xr3);
//compute a3
a(1,1) = (2.0 * Random<T>(localState) - 1.0) * r;
//compute a1 and a2
xr1 = xr3 - a(1,1) * a(1,1);
xr1 = abs(xr1);
xr1 = sqrt(xr1);
//xr2 is a random number between 0 and 2*pi
xr2 = PII * Random<T>(localState);
a(0,1) = xr1 * cos(xr2);
a(1,0) = xr1 * sin(xr2);
return a;
}
/**
@brief Return SU(2) subgroup (4 real numbers) from SU(3) matrix
@param tmp1 input SU(3) matrix
@param block to retrieve from 0 to 2.
@return 4 real numbers
*/
template < class T>
__host__ __device__ static inline Matrix<T,2> get_block_su2( Matrix<typename ComplexTypeId<T>::Type,3> tmp1, int block ){
Matrix<T,2> r;
switch ( block ) {
case 0:
r(0,0) = tmp1(0,0).x + tmp1(1,1).x;
r(0,1) = tmp1(0,1).y + tmp1(1,0).y;
r(1,0) = tmp1(0,1).x - tmp1(1,0).x;
r(1,1) = tmp1(0,0).y - tmp1(1,1).y;
break;
case 1:
r(0,0) = tmp1(1,1).x + tmp1(2,2).x;
r(0,1) = tmp1(1,2).y + tmp1(2,1).y;
r(1,0) = tmp1(1,2).x - tmp1(2,1).x;
r(1,1) = tmp1(1,1).y - tmp1(2,2).y;
break;
case 2:
r(0,0) = tmp1(0,0).x + tmp1(2,2).x;
r(0,1) = tmp1(0,2).y + tmp1(2,0).y;
r(1,0) = tmp1(0,2).x - tmp1(2,0).x;
r(1,1) = tmp1(0,0).y - tmp1(2,2).y;
break;
}
return r;
}
/**
@brief Return SU(2) subgroup (4 real numbers) from SU(Nc) matrix
@param tmp1 input SU(Nc) matrix
@param id the two indices to retrieve SU(2) block
@return 4 real numbers
*/
template <class T, int NCOLORS>
__host__ __device__ static inline Matrix<T,2> get_block_su2( Matrix<typename ComplexTypeId<T>::Type,NCOLORS> tmp1, int2 id ){
Matrix<T,2> r;
r(0,0) = tmp1(id.x,id.x).x + tmp1(id.y,id.y).x;
r(0,1) = tmp1(id.x,id.y).y + tmp1(id.y,id.x).y;
r(1,0) = tmp1(id.x,id.y).x - tmp1(id.y,id.x).x;
r(1,1) = tmp1(id.x,id.x).y - tmp1(id.y,id.y).y;
return r;
}
/**
@brief Create a SU(Nc) identity matrix and fills with the SU(2) block
@param rr SU(2) matrix represented only by four real numbers
@param id the two indices to fill in the SU(3) matrix
@return SU(Nc) matrix
*/
template <class T, int NCOLORS>
__host__ __device__ static inline Matrix<typename ComplexTypeId<T>::Type,NCOLORS> block_su2_to_sun( Matrix<T,2> rr, int2 id ){
Matrix<typename ComplexTypeId<T>::Type,NCOLORS> tmp1;
setIdentity(&tmp1);
tmp1(id.x,id.x) = makeComplex( rr(0,0), rr(1,1) );
tmp1(id.x,id.y) = makeComplex( rr(1,0), rr(0,1) );
tmp1(id.y,id.x) = makeComplex(-rr(1,0), rr(0,1) );
tmp1(id.y,id.y) = makeComplex( rr(0,0),-rr(1,1) );
return tmp1;
}
/**
@brief Update the SU(Nc) link with the new SU(2) matrix, link <- u * link
@param u SU(2) matrix represented by four real numbers
@param link SU(Nc) matrix
@param id indices
*/
template <class T, int NCOLORS>
__host__ __device__ static inline void mul_block_sun( Matrix<T,2> u, Matrix<typename ComplexTypeId<T>::Type,NCOLORS> &link, int2 id ){
typename ComplexTypeId<T>::Type tmp;
for ( int j = 0; j < NCOLORS; j++ ) {
tmp = makeComplex( u(0,0), u(1,1) ) * link(id.x, j) + makeComplex( u(1,0), u(0,1) ) * link(id.y, j);
link(id.y, j) = makeComplex(-u(1,0), u(0,1) ) * link(id.x, j) + makeComplex( u(0,0),-u(1,1) ) * link(id.y, j);
link(id.x, j) = tmp;
}
}
/**
@brief Update the SU(3) link with the new SU(2) matrix, link <- u * link
@param U SU(3) matrix
@param a00 element (0,0) of the SU(2) matrix
@param a01 element (0,1) of the SU(2) matrix
@param a10 element (1,0) of the SU(2) matrix
@param a11 element (1,1) of the SU(2) matrix
@param block of the SU(3) matrix, 0,1 or 2
*/
template <class Cmplx>
__host__ __device__ static inline void block_su2_to_su3( Matrix<Cmplx,3> &U, Cmplx a00, Cmplx a01, Cmplx a10, Cmplx a11, int block ){
Cmplx tmp;
switch ( block ) {
case 0:
tmp = a00 * U(0,0) + a01 * U(1,0);
U(1,0) = a10 * U(0,0) + a11 * U(1,0);
U(0,0) = tmp;
tmp = a00 * U(0,1) + a01 * U(1,1);
U(1,1) = a10 * U(0,1) + a11 * U(1,1);
U(0,1) = tmp;
tmp = a00 * U(0,2) + a01 * U(1,2);
U(1,2) = a10 * U(0,2) + a11 * U(1,2);
U(0,2) = tmp;
break;
case 1:
tmp = a00 * U(1,0) + a01 * U(2,0);
U(2,0) = a10 * U(1,0) + a11 * U(2,0);
U(1,0) = tmp;
tmp = a00 * U(1,1) + a01 * U(2,1);
U(2,1) = a10 * U(1,1) + a11 * U(2,1);
U(1,1) = tmp;
tmp = a00 * U(1,2) + a01 * U(2,2);
U(2,2) = a10 * U(1,2) + a11 * U(2,2);
U(1,2) = tmp;
break;
case 2:
tmp = a00 * U(0,0) + a01 * U(2,0);
U(2,0) = a10 * U(0,0) + a11 * U(2,0);
U(0,0) = tmp;
tmp = a00 * U(0,1) + a01 * U(2,1);
U(2,1) = a10 * U(0,1) + a11 * U(2,1);
U(0,1) = tmp;
tmp = a00 * U(0,2) + a01 * U(2,2);
U(2,2) = a10 * U(0,2) + a11 * U(2,2);
U(0,2) = tmp;
break;
}
}
// v * u^dagger
template <class Float>
__host__ __device__ static inline Matrix<Float,2> mulsu2UVDagger(Matrix<Float,2> v, Matrix<Float,2> u){
Matrix<Float,2> b;
b(0,0) = v(0,0) * u(0,0) + v(0,1) * u(0,1) + v(1,0) * u(1,0) + v(1,1) * u(1,1);
b(0,1) = v(0,1) * u(0,0) - v(0,0) * u(0,1) + v(1,0) * u(1,1) - v(1,1) * u(1,0);
b(1,0) = v(1,0) * u(0,0) - v(0,0) * u(1,0) + v(1,1) * u(0,1) - v(0,1) * u(1,1);
b(1,1) = v(1,1) * u(0,0) - v(0,0) * u(1,1) + v(0,1) * u(1,0) - v(1,0) * u(0,1);
return b;
}
/**
@brief Link update by pseudo-heatbath
@param U link to be updated
@param F staple
@param localstate CURAND rng state
*/
template <class Float, int NCOLORS>
__device__ inline void heatBathSUN( Matrix<typename ComplexTypeId<Float>::Type,NCOLORS>& U, Matrix<typename ComplexTypeId<Float>::Type,NCOLORS> F, \
cuRNGState& localState, Float BetaOverNc ){
typedef typename ComplexTypeId<Float>::Type Cmplx;
if ( NCOLORS == 3 ) {
//////////////////////////////////////////////////////////////////
/*
for( int block = 0; block < NCOLORS; block++ ) {
Matrix<typename ComplexTypeId<T>::Type,3> tmp1 = U * F;
Matrix<T,2> r = get_block_su2<T>(tmp1, block);
T k = sqrt(r(0,0)*r(0,0)+r(0,1)*r(0,1)+r(1,0)*r(1,0)+r(1,1)*r(1,1));
T ap = BetaOverNc * k;
k = (T)1.0 / k;
r *= k;
//Matrix<T,2> a = generate_su2_matrix<T4, T>(ap, localState);
Matrix<T,2> a = generate_su2_matrix_milc<T>(ap, localState);
r = mulsu2UVDagger_4<T>( a, r);
///////////////////////////////////////
block_su2_to_su3<T>( U, complex( r(0,0), r(1,1) ), complex( r(1,0), r(0,1) ), complex(-r(1,0), r(0,1) ), complex( r(0,0),-r(1,1) ), block );
//FLOP_min = (198 + 4 + 15 + 28 + 28 + 84) * 3 = 1071
}*/
//////////////////////////////////////////////////////////////////
for ( int block = 0; block < NCOLORS; block++ ) {
int p,q;
IndexBlock<NCOLORS>(block, p, q);
Cmplx a0 = makeComplex((Float)0.0, (Float)0.0);
Cmplx a1 = a0;
Cmplx a2 = a0;
Cmplx a3 = a0;
for ( int j = 0; j < NCOLORS; j++ ) {
a0 += U(p,j) * F(j,p);
a1 += U(p,j) * F(j,q);
a2 += U(q,j) * F(j,p);
a3 += U(q,j) * F(j,q);
}
Matrix<Float,2> r;
r(0,0) = a0.x + a3.x;
r(0,1) = a1.y + a2.y;
r(1,0) = a1.x - a2.x;
r(1,1) = a0.y - a3.y;
Float k = sqrt(r(0,0) * r(0,0) + r(0,1) * r(0,1) + r(1,0) * r(1,0) + r(1,1) * r(1,1));;
Float ap = BetaOverNc * k;
k = 1.0 / k;
r *= k;
Matrix<Float,2> a = generate_su2_matrix_milc<Float>(ap, localState);
r = mulsu2UVDagger<Float>( a, r);
///////////////////////////////////////
a0 = makeComplex( r(0,0), r(1,1) );
a1 = makeComplex( r(1,0), r(0,1) );
a2 = makeComplex(-r(1,0), r(0,1) );
a3 = makeComplex( r(0,0),-r(1,1) );
Cmplx tmp0;
for ( int j = 0; j < NCOLORS; j++ ) {
tmp0 = a0 * U(p,j) + a1 * U(q,j);
U(q,j) = a2 * U(p,j) + a3 * U(q,j);
U(p,j) = tmp0;
}
//FLOP_min = (NCOLORS * 64 + 19 + 28 + 28) * 3 = NCOLORS * 192 + 225
}
//////////////////////////////////////////////////////////////////
}
else if ( NCOLORS > 3 ) {
//////////////////////////////////////////////////////////////////
//TESTED IN SU(4) SP THIS IS WORST
Matrix<typename ComplexTypeId<Float>::Type,NCOLORS> M = U * F;
for ( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) {
int2 id = IndexBlock<NCOLORS>( block );
Matrix<Float,2> r = get_block_su2<Float>(M, id);
Float k = sqrt(r(0,0) * r(0,0) + r(0,1) * r(0,1) + r(1,0) * r(1,0) + r(1,1) * r(1,1));
Float ap = BetaOverNc * k;
k = 1.0 / k;
r *= k;
Matrix<Float,2> a = generate_su2_matrix_milc<Float>(ap, localState);
Matrix<Float,2> rr = mulsu2UVDagger<Float>( a, r);
///////////////////////////////////////
mul_block_sun<Float, NCOLORS>( rr, U, id);
mul_block_sun<Float, NCOLORS>( rr, M, id);
///////////////////////////////////////
}
/* / TESTED IN SU(4) SP THIS IS FASTER
for ( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) {
int2 id = IndexBlock<NCOLORS>( block );
complex a0 = complex::zero();
complex a1 = complex::zero();
complex a2 = complex::zero();
complex a3 = complex::zero();
for ( int j = 0; j < NCOLORS; j++ ) {
a0 += U(id.x, j) * F.e[j][id.x];
a1 += U(id.x, j) * F.e[j][id.y];
a2 += U(id.y, j) * F.e[j][id.x];
a3 += U(id.y, j) * F.e[j][id.y];
}
Matrix<T,2> r;
r(0,0) = a0.x + a3.x;
r(0,1) = a1.y + a2.y;
r(1,0) = a1.x - a2.x;
r(1,1) = a0.y - a3.y;
T k = sqrt(r(0,0) * r(0,0) + r(0,1) * r(0,1) + r(1,0) * r(1,0) + r(1,1) * r(1,1));
T ap = BetaOverNc * k;
k = (T)1.0 / k;
r *= k;
//Matrix<T,2> a = generate_su2_matrix<T4, T>(ap, localState);
Matrix<T,2> a = generate_su2_matrix_milc<T>(ap, localState);
r = mulsu2UVDagger<T>( a, r);
mul_block_sun<T>( r, U, id); * /
/*
a0 = complex( r(0,0), r(1,1) );
a1 = complex( r(1,0), r(0,1) );
a2 = complex(-r(1,0), r(0,1) );
a3 = complex( r(0,0),-r(1,1) );
complex tmp0;
for ( int j = 0; j < NCOLORS; j++ ) {
tmp0 = a0 * U(id.x, j) + a1 * U(id.y, j);
U(id.y, j) = a2 * U(id.x, j) + a3 * U(id.y, j);
U(id.x, j) = tmp0;
} */
// }
}
//////////////////////////////////////////////////////////////////
}
//////////////////////////////////////////////////////////////////////////
/**
@brief Link update by overrelaxation
@param U link to be updated
@param F staple
*/
template <class Float, int NCOLORS>
__device__ inline void overrelaxationSUN( Matrix<typename ComplexTypeId<Float>::Type,NCOLORS>& U, Matrix<typename ComplexTypeId<Float>::Type,NCOLORS> F ){
typedef typename ComplexTypeId<Float>::Type Cmplx;
if ( NCOLORS == 3 ) {
//////////////////////////////////////////////////////////////////
/*
for( int block = 0; block < 3; block++ ) {
Matrix<typename ComplexTypeId<T>::Type,3> tmp1 = U * F;
Matrix<T,2> r = get_block_su2<T>(tmp1, block);
//normalize and conjugate
Float norm = 1.0 / sqrt(r(0,0)*r(0,0)+r(0,1)*r(0,1)+r(1,0)*r(1,0)+r(1,1)*r(1,1));;
r(0,0) *= norm;
r(0,1) *= -norm;
r(1,0) *= -norm;
r(1,1) *= -norm;
///////////////////////////////////////
complex a00 = complex( r(0,0), r(1,1) );
complex a01 = complex( r(1,0), r(0,1) );
complex a10 = complex(-r(1,0), r(0,1) );
complex a11 = complex( r(0,0),-r(1,1) );
block_su2_to_su3<T>( U, a00, a01, a10, a11, block );
block_su2_to_su3<T>( U, a00, a01, a10, a11, block );
//FLOP = (198 + 17 + 84 * 2) * 3 = 1149
}*/
///////////////////////////////////////////////////////////////////
//This version does not need to multiply all matrix at each block: tmp1 = U * F;
//////////////////////////////////////////////////////////////////
for ( int block = 0; block < 3; block++ ) {
int p,q;
IndexBlock<NCOLORS>(block, p, q);
Cmplx a0 = makeComplex((Float)0., (Float)0.);
Cmplx a1 = a0;
Cmplx a2 = a0;
Cmplx a3 = a0;
for ( int j = 0; j < NCOLORS; j++ ) {
a0 += U(p,j) * F(j,p);
a1 += U(p,j) * F(j,q);
a2 += U(q,j) * F(j,p);
a3 += U(q,j) * F(j,q);
}
Matrix<Float,2> r;
r(0,0) = a0.x + a3.x;
r(0,1) = a1.y + a2.y;
r(1,0) = a1.x - a2.x;
r(1,1) = a0.y - a3.y;
//normalize and conjugate
//r = r.conj_normalize();
Float norm = 1.0 / sqrt(r(0,0) * r(0,0) + r(0,1) * r(0,1) + r(1,0) * r(1,0) + r(1,1) * r(1,1));;
r(0,0) *= norm;
r(0,1) *= -norm;
r(1,0) *= -norm;
r(1,1) *= -norm;
///////////////////////////////////////
a0 = makeComplex( r(0,0), r(1,1) );
a1 = makeComplex( r(1,0), r(0,1) );
a2 = makeComplex(-r(1,0), r(0,1) );
a3 = makeComplex( r(0,0),-r(1,1) );
Cmplx tmp0, tmp1;
for ( int j = 0; j < NCOLORS; j++ ) {
tmp0 = a0 * U(p,j) + a1 * U(q,j);
tmp1 = a2 * U(p,j) + a3 * U(q,j);
U(p,j) = a0 * tmp0 + a1 * tmp1;
U(q,j) = a2 * tmp0 + a3 * tmp1;
}
//FLOP = (NCOLORS * 88 + 17) * 3
}
///////////////////////////////////////////////////////////////////
}
else if ( NCOLORS > 3 ) {
///////////////////////////////////////////////////////////////////
Matrix<typename ComplexTypeId<Float>::Type,NCOLORS> M = U * F;
for ( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) {
int2 id = IndexBlock<NCOLORS>( block );
Matrix<Float,2> r = get_block_su2<Float, NCOLORS>(M, id);
//normalize and conjugate
Float norm = 1.0 / sqrt(r(0,0) * r(0,0) + r(0,1) * r(0,1) + r(1,0) * r(1,0) + r(1,1) * r(1,1));;
r(0,0) *= norm;
r(0,1) *= -norm;
r(1,0) *= -norm;
r(1,1) *= -norm;
mul_block_sun<Float, NCOLORS>( r, U, id);
mul_block_sun<Float, NCOLORS>( r, U, id);
mul_block_sun<Float, NCOLORS>( r, M, id);
mul_block_sun<Float, NCOLORS>( r, M, id);
///////////////////////////////////////
}
/* //TESTED IN SU(4) SP THIS IS WORST
for( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) {
int2 id = IndexBlock<NCOLORS>( block );
complex a0 = complex::zero();
complex a1 = complex::zero();
complex a2 = complex::zero();
complex a3 = complex::zero();
for(int j = 0; j < NCOLORS; j++){
a0 += U(id.x, j) * F.e[j][id.x];
a1 += U(id.x, j) * F.e[j][id.y];
a2 += U(id.y, j) * F.e[j][id.x];
a3 += U(id.y, j) * F.e[j][id.y];
}
Matrix<T,2> r;
r(0,0) = a0.x + a3.x;
r(0,1) = a1.y + a2.y;
r(1,0) = a1.x - a2.x;
r(1,1) = a0.y - a3.y;
//normalize and conjugate
Float norm = 1.0 / sqrt(r(0,0)*r(0,0)+r(0,1)*r(0,1)+r(1,0)*r(1,0)+r(1,1)*r(1,1));;
r(0,0) *= norm;
r(0,1) *= -norm;
r(1,0) *= -norm;
r(1,1) *= -norm;
//mul_block_sun<T>( r, U, id);
//mul_block_sun<T>( r, U, id);
///////////////////////////////////////
a0 = complex( r(0,0), r(1,1) );
a1 = complex( r(1,0), r(0,1) );
a2 = complex(-r(1,0), r(0,1) );
a3 = complex( r(0,0),-r(1,1) );
complex tmp0, tmp1;
for(int j = 0; j < NCOLORS; j++){
tmp0 = a0 * U(id.x, j) + a1 * U(id.y, j);
tmp1 = a2 * U(id.x, j) + a3 * U(id.y, j);
U(id.x, j) = a0 * tmp0 + a1 * tmp1;
U(id.y, j) = a2 * tmp0 + a3 * tmp1;
}
}
*/
}
}
template <typename Gauge, typename Float, int NCOLORS>
struct MonteArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
cudaGaugeField &data;
Float BetaOverNc;
RNG rngstate;
MonteArg(const Gauge &dataOr, cudaGaugeField & data, Float Beta, RNG &rngstate)
: dataOr(dataOr), data(data), rngstate(rngstate) {
BetaOverNc = Beta / (Float)NCOLORS;
#ifdef MULTI_GPU
for ( int dir = 0; dir < 4; ++dir ) {
border[dir] = data.R()[dir];
X[dir] = data.X()[dir] - border[dir] * 2;
}
#else
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
#endif
threads = X[0] * X[1] * X[2] * X[3] >> 1;
}
};
template<typename Float, typename Gauge, int NCOLORS, bool HeatbathOrRelax>
__global__ void compute_heatBath(MonteArg<Gauge, Float, NCOLORS> arg, int mu, int parity){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx >= arg.threads ) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
int id = idx;
int X[4];
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
int x[4];
getCoords(x, idx, X, parity);
#ifdef MULTI_GPU
#pragma unroll
for ( int dr = 0; dr < 4; ++dr ) {
x[dr] += arg.border[dr];
X[dr] += 2 * arg.border[dr];
}
idx = linkIndex(x,X);
#endif
Matrix<Cmplx,NCOLORS> staple;
setZero(&staple);
Matrix<Cmplx,NCOLORS> U;
for ( int nu = 0; nu < 4; nu++ ) if ( mu != nu ) {
int dx[4] = { 0, 0, 0, 0 };
Matrix<Cmplx,NCOLORS> link;
arg.dataOr.load((Float*)(link.data), idx, nu, parity);
dx[nu]++;
arg.dataOr.load((Float*)(U.data), linkIndexShift(x,dx,X), mu, 1 - parity);
link *= U;
dx[nu]--;
dx[mu]++;
arg.dataOr.load((Float*)(U.data), linkIndexShift(x,dx,X), nu, 1 - parity);
link *= conj(U);
staple += link;
dx[mu]--;
dx[nu]--;
arg.dataOr.load((Float*)(link.data), linkIndexShift(x,dx,X), nu, 1 - parity);
arg.dataOr.load((Float*)(U.data), linkIndexShift(x,dx,X), mu, 1 - parity);
link = conj(link) * U;
dx[mu]++;
arg.dataOr.load((Float*)(U.data), linkIndexShift(x,dx,X), nu, parity);
link *= U;
staple += link;
}
arg.dataOr.load((Float*)(U.data), idx, mu, parity);
if ( HeatbathOrRelax ) {
cuRNGState localState = arg.rngstate.State()[ id ];
heatBathSUN<Float, NCOLORS>( U, conj(staple), localState, arg.BetaOverNc );
arg.rngstate.State()[ id ] = localState;
}
else{
overrelaxationSUN<Float, NCOLORS>( U, conj(staple) );
}
arg.dataOr.save((Float*)(U.data), idx, mu, parity);
}
template<typename Float, typename Gauge, int NCOLORS, int NElems, bool HeatbathOrRelax>
class GaugeHB : Tunable {
MonteArg<Gauge, Float, NCOLORS> arg;
int mu;
int parity;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeHB(MonteArg<Gauge, Float, NCOLORS> &arg)
: arg(arg), mu(0), parity(0) {
}
~GaugeHB () {
}
void SetParam(int _mu, int _parity){
mu = _mu;
parity = _parity;
}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
compute_heatBath<Float, Gauge, NCOLORS, HeatbathOrRelax ><< < tp.grid,tp.block, tp.shared_bytes, stream >> > (arg, mu, parity);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune() {
arg.data.backup();
if(HeatbathOrRelax) arg.rngstate.backup();
}
void postTune() {
arg.data.restore();
if(HeatbathOrRelax) arg.rngstate.restore();
}
long long flops() const {
//NEED TO CHECK THIS!!!!!!
if ( NCOLORS == 3 ) {
long long flop = 2268LL;
if ( HeatbathOrRelax ) {
flop += 801LL;
}
else{
flop += 843LL;
}
flop *= arg.threads;
return flop;
}
else{
long long flop = NCOLORS * NCOLORS * NCOLORS * 84LL;
if ( HeatbathOrRelax ) {
flop += NCOLORS * NCOLORS * NCOLORS + (NCOLORS * ( NCOLORS - 1) / 2) * (46LL + 48LL + 56LL * NCOLORS);
}
else{
flop += NCOLORS * NCOLORS * NCOLORS + (NCOLORS * ( NCOLORS - 1) / 2) * (17LL + 112LL * NCOLORS);
}
flop *= arg.threads;
return flop;
}
}
long long bytes() const {
//NEED TO CHECK THIS!!!!!!
if ( NCOLORS == 3 ) {
long long byte = 20LL * NElems * sizeof(Float);
if ( HeatbathOrRelax ) byte += 2LL * sizeof(cuRNGState);
byte *= arg.threads;
return byte;
}
else{
long long byte = 20LL * NCOLORS * NCOLORS * 2 * sizeof(Float);
if ( HeatbathOrRelax ) byte += 2LL * sizeof(cuRNGState);
byte *= arg.threads;
return byte;
}
}
};
template<typename Float, int NElems, int NCOLORS, typename Gauge>
void Monte( Gauge dataOr, cudaGaugeField& data, RNG &rngstate, Float Beta, unsigned int nhb, unsigned int nover) {
TimeProfile profileHBOVR("HeatBath_OR_Relax", false);
MonteArg<Gauge, Float, NCOLORS> montearg(dataOr, data, Beta, rngstate);
if ( getVerbosity() >= QUDA_SUMMARIZE ) profileHBOVR.TPSTART(QUDA_PROFILE_COMPUTE);
GaugeHB<Float, Gauge, NCOLORS, NElems, true> hb(montearg);
for ( int step = 0; step < nhb; ++step ) {
for ( int parity = 0; parity < 2; ++parity ) {
for ( int mu = 0; mu < 4; ++mu ) {
hb.SetParam(mu, parity);
hb.apply(0);
#ifdef MULTI_GPU
PGaugeExchange( data, mu, parity);
#endif
}
}
}
if ( getVerbosity() >= QUDA_SUMMARIZE ) {
cudaDeviceSynchronize();
profileHBOVR.TPSTOP(QUDA_PROFILE_COMPUTE);
double secs = profileHBOVR.Last(QUDA_PROFILE_COMPUTE);
double gflops = (hb.flops() * 8 * nhb * 1e-9) / (secs);
double gbytes = hb.bytes() * 8 * nhb / (secs * 1e9);
#ifdef MULTI_GPU
printfQuda("HB: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops * comm_size(), gbytes * comm_size());
#else
printfQuda("HB: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
#endif
}
if ( getVerbosity() >= QUDA_SUMMARIZE ) profileHBOVR.TPSTART(QUDA_PROFILE_COMPUTE);
GaugeHB<Float, Gauge, NCOLORS, NElems, false> relax(montearg);
for ( int step = 0; step < nover; ++step ) {
for ( int parity = 0; parity < 2; ++parity ) {
for ( int mu = 0; mu < 4; ++mu ) {
relax.SetParam(mu, parity);
relax.apply(0);
#ifdef MULTI_GPU
PGaugeExchange( data, mu, parity);
#endif
}
}
}
if ( getVerbosity() >= QUDA_SUMMARIZE ) {
cudaDeviceSynchronize();
profileHBOVR.TPSTOP(QUDA_PROFILE_COMPUTE);
double secs = profileHBOVR.Last(QUDA_PROFILE_COMPUTE);
double gflops = (relax.flops() * 8 * nover * 1e-9) / (secs);
double gbytes = relax.bytes() * 8 * nover / (secs * 1e9);
#ifdef MULTI_GPU
printfQuda("OVR: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops * comm_size(), gbytes * comm_size());
#else
printfQuda("OVR: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
#endif
}
}
template<typename Float>
void Monte( cudaGaugeField& data, RNG &rngstate, Float Beta, unsigned int nhb, unsigned int nover) {
if ( data.isNative() ) {
if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
Monte<Float, 18, 3>(Gauge(data), data, rngstate, Beta, nhb, nover);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
Monte<Float, 12, 3>(Gauge(data), data, rngstate, Beta, nhb, nover);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
Monte<Float, 8, 3>(Gauge(data), data, rngstate, Beta, nhb, nover);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
}
#endif // GPU_GAUGE_ALG
/** @brief Perform heatbath and overrelaxation. Performs nhb heatbath steps followed by nover overrelaxation steps.
*
* @param[in,out] data Gauge field
* @param[in,out] rngstate state of the CURAND random number generator
* @param[in] Beta inverse of the gauge coupling, beta = 2 Nc / g_0^2
* @param[in] nhb number of heatbath steps
* @param[in] nover number of overrelaxation steps
*/
void Monte( cudaGaugeField& data, RNG &rngstate, double Beta, unsigned int nhb, unsigned int nover) {
#ifdef GPU_GAUGE_ALG
if ( data.Precision() == QUDA_SINGLE_PRECISION ) {
Monte<float> (data, rngstate, (float)Beta, nhb, nover);
} else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) {
Monte<double>(data, rngstate, Beta, nhb, nover);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Pure gauge code has not been built");
#endif // GPU_GAUGE_ALG
}
}
|
92830467cffe47717ba82a89a1e1ad779143455e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void _norm_forward_kernel(float *x, float *mean, float *variance, int b, int c, int wxh) {
int ind = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
int j = (ind / wxh) % c;
if (ind >= b * c * wxh) return;
x[ind] = (x[ind] - mean[j]) / (sqrt(variance[j] + 0.000001f));
} | 92830467cffe47717ba82a89a1e1ad779143455e.cu | #include "includes.h"
__global__ void _norm_forward_kernel(float *x, float *mean, float *variance, int b, int c, int wxh) {
int ind = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
int j = (ind / wxh) % c;
if (ind >= b * c * wxh) return;
x[ind] = (x[ind] - mean[j]) / (sqrt(variance[j] + 0.000001f));
} |
d5a76cd337ca8a7450fd1a2f6a52282d88ada97c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// CUDA kernel -- Generate dot product
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include "error_util.h"
using namespace std;
const int THREADS_NUM = 1024;
__global__ static void dot_product(float * a, float * b, float * c, int array_size)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < array_size)
{
extern __shared__ float c_tmp[];
c_tmp[tid] = a[tid] * b[tid];
__syncthreads();
size_t length = array_size;
size_t offset = (length - 1)/2 + 1;
while (length >= 2)
{
if (tid + offset < length)
{
c_tmp[tid] = c_tmp[tid] + c_tmp[tid + offset];
//__syncthreads();
}
length = (length - 1)/2 + 1;
offset = (offset - 1)/2 + 1;
__syncthreads();
}
c[0] = c_tmp[0];
// c[tid] = a[tid] + b[tid];
}
}
int main()
{
int length = THREADS_NUM;
float * a = new float [length];
float * b = new float [length];
float c = 0;
for (size_t i = 0; i < length; i++)
{
a[i] = 1.5;
b[i] = 2.5;
}
hipSetDevice(1);
// Create CUDA event
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start, 0));
// prepare data on GPU
float * gpu_a;
float * gpu_b;
float * gpu_c;
checkCudaErrors(hipMalloc((void **)&gpu_a, sizeof(float) * length));
checkCudaErrors(hipMalloc((void **)&gpu_b, sizeof(float) * length));
checkCudaErrors(hipMalloc((void **)&gpu_c, sizeof(float) * length));
checkCudaErrors(hipMemcpy(gpu_a, a, sizeof(float) * length, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(gpu_b, b, sizeof(float) * length, hipMemcpyHostToDevice));
// kernel function
size_t blockNum = 1;
size_t threadsNum = THREADS_NUM;
hipLaunchKernelGGL(( dot_product), dim3(blockNum), dim3(threadsNum), sizeof(float) * length, 0, gpu_a, gpu_b, gpu_c, length);
// Copy data back
hipMemcpy(&c, gpu_c, sizeof(float), hipMemcpyDeviceToHost);
// output
cout<<"Sum = "<<c<<endl;
checkCudaErrors(hipFree(gpu_a));
checkCudaErrors(hipFree(gpu_b));
checkCudaErrors(hipFree(gpu_c));
delete [] a;
delete [] b;
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize( stop ));
float elapsedTime;
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf( "Time to generate dot product: %.3f ms\n", elapsedTime );
return 0;
}
| d5a76cd337ca8a7450fd1a2f6a52282d88ada97c.cu | // CUDA kernel -- Generate dot product
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include "error_util.h"
using namespace std;
const int THREADS_NUM = 1024;
__global__ static void dot_product(float * a, float * b, float * c, int array_size)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < array_size)
{
extern __shared__ float c_tmp[];
c_tmp[tid] = a[tid] * b[tid];
__syncthreads();
size_t length = array_size;
size_t offset = (length - 1)/2 + 1;
while (length >= 2)
{
if (tid + offset < length)
{
c_tmp[tid] = c_tmp[tid] + c_tmp[tid + offset];
//__syncthreads();
}
length = (length - 1)/2 + 1;
offset = (offset - 1)/2 + 1;
__syncthreads();
}
c[0] = c_tmp[0];
// c[tid] = a[tid] + b[tid];
}
}
int main()
{
int length = THREADS_NUM;
float * a = new float [length];
float * b = new float [length];
float c = 0;
for (size_t i = 0; i < length; i++)
{
a[i] = 1.5;
b[i] = 2.5;
}
cudaSetDevice(1);
// Create CUDA event
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start, 0));
// prepare data on GPU
float * gpu_a;
float * gpu_b;
float * gpu_c;
checkCudaErrors(cudaMalloc((void **)&gpu_a, sizeof(float) * length));
checkCudaErrors(cudaMalloc((void **)&gpu_b, sizeof(float) * length));
checkCudaErrors(cudaMalloc((void **)&gpu_c, sizeof(float) * length));
checkCudaErrors(cudaMemcpy(gpu_a, a, sizeof(float) * length, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(gpu_b, b, sizeof(float) * length, cudaMemcpyHostToDevice));
// kernel function
size_t blockNum = 1;
size_t threadsNum = THREADS_NUM;
dot_product<<<blockNum, threadsNum, sizeof(float) * length>>>(gpu_a, gpu_b, gpu_c, length);
// Copy data back
cudaMemcpy(&c, gpu_c, sizeof(float), cudaMemcpyDeviceToHost);
// output
cout<<"Sum = "<<c<<endl;
checkCudaErrors(cudaFree(gpu_a));
checkCudaErrors(cudaFree(gpu_b));
checkCudaErrors(cudaFree(gpu_c));
delete [] a;
delete [] b;
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize( stop ));
float elapsedTime;
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf( "Time to generate dot product: %.3f ms\n", elapsedTime );
return 0;
}
|
7d3ed16a0609cedf902f0ae8b7e1106f95de7bc6.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace twistedclover {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#if (__COMPUTE_CAPABILITY__ >= 200) && defined(GPU_TWISTED_CLOVER_DIRAC)
#include <tmc_dslash_def.h> // Twisted Clover kernels
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace twisted_clover
// declare the dslash events
#include <dslash_events.cuh>
using namespace twistedclover;
#if (__COMPUTE_CAPABILITY__ >= 200) && defined(GPU_TWISTED_CLOVER_DIRAC)
template <typename sFloat, typename gFloat, typename cFloat>
class TwistedCloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistCloverDslashType dslashType;
double a, b, c, d;
const cFloat *clover;
const float *cNorm;
const cFloat *cloverInv;
const float *cNrm2;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
TwistedCloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover, const float *cNorm,
const cFloat *cloverInv, const float *cNrm2, int cl_stride, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const QudaTwistCloverDslashType dslashType, const double kappa,
const double mu, const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct,dagger),gauge0(gauge0), gauge1(gauge1), clover(clover),
cNorm(cNorm), cloverInv(cloverInv), cNrm2(cNrm2), dslashType(dslashType)
{
bindSpinorTex<sFloat>(in, out, x);
dslashParam.cl_stride = cl_stride;
dslashParam.fl_stride = in->VolumeCB();
a = kappa;
b = mu;
c = epsilon;
d = k;
}
virtual ~TwistedCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
switch(dslashType){
case QUDA_DEG_CLOVER_TWIST_INV_DSLASH:
strcat(key.aux,",CloverTwistInvDslash");
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_INV:
strcat(key.aux,",Dslash");
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY:
strcat(key.aux,",DslashCloverTwist");
break;
}
return key;
}
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(dslashType){
case QUDA_DEG_CLOVER_TWIST_INV_DSLASH:
DSLASH(twistedCloverInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cNorm, cloverInv, cNrm2,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_INV:
DSLASH(twistedCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cNorm, cloverInv, cNrm2,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY:
DSLASH(twistedCloverDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cNorm, cloverInv, cNrm2,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)x->V(), (float*)x->Norm());
break;
default: errorQuda("Invalid twisted clover dslash type");
}
}
long long flops() const {
int clover_flops = 504 + 48;
long long flops = DslashCuda::flops();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
// clover flops are done in the interior kernel
flops += clover_flops * in->VolumeCB();
break;
}
return flops;
}
long long bytes() const {
bool isHalf = in->Precision() == sizeof(short) ? true : false;
int clover_bytes = 72 * in->Precision() + (isHalf ? 2*sizeof(float) : 0);
long long bytes = DslashCuda::bytes();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
bytes += clover_bytes*in->VolumeCB();
break;
}
return bytes;
}
};
#endif // GPU_TWISTED_CLOVER_DIRAC
#include <dslash_policy.cuh>
void twistedCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover *clover, const FullClover *cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistCloverDslashType type, const double &kappa, const double &mu,
const double &epsilon, const double &k, const int *commOverride,
TimeProfile &profile, const QudaDslashPolicy &dslashPolicy)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#if (__COMPUTE_CAPABILITY__ >= 200) && defined(GPU_TWISTED_CLOVER_DIRAC)
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->Volume() : in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->GhostFace()[i] : in->GhostFace()[i] / 2;
}
#ifdef MULTI_GPU
twist_a = 0.;
twist_b = 0.;
#endif
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
void *cloverP, *cloverNormP, *cloverInvP, *cloverInvNormP;
QudaPrecision clover_prec = bindTwistedCloverTex(*clover, *cloverInv, parity, &cloverP, &cloverNormP, &cloverInvP, &cloverInvNormP);
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
if (clover->stride != cloverInv->stride)
errorQuda("clover and cloverInv must have matching strides (%d != %d)", clover->stride, cloverInv->stride);
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new TwistedCloverDslashCuda<double2,double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), (double2*)cloverP, (float*)cloverNormP,
(double2*)cloverInvP, (float*)cloverInvNormP, clover->stride, in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new TwistedCloverDslashCuda<float4,float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), (float4*)cloverP, (float*)cloverNormP,
(float4*)cloverInvP, (float*)cloverInvNormP, clover->stride, in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new TwistedCloverDslashCuda<short4,short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), (short4*)cloverP, (float*)cloverNormP,
(short4*)cloverInvP, (float*)cloverInvNormP, clover->stride, in, x, type, kappa, mu, epsilon, k, dagger);
}
#ifndef GPU_COMMS
DslashPolicyImp* dslashImp = DslashFactory::create(dslashPolicy);
#else
DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
#endif
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
delete dslashImp;
delete dslash;
unbindGaugeTex(gauge);
unbindTwistedCloverTex(*clover);
checkCudaError();
#else
#if (__COMPUTE_CAPABILITY__ < 200)
errorQuda("Twisted-clover fermions not supported on pre-Fermi architecture");
#else
errorQuda("Twisted clover dslash has not been built");
#endif
#endif
}
}
| 7d3ed16a0609cedf902f0ae8b7e1106f95de7bc6.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace twistedclover {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#if (__COMPUTE_CAPABILITY__ >= 200) && defined(GPU_TWISTED_CLOVER_DIRAC)
#include <tmc_dslash_def.h> // Twisted Clover kernels
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace twisted_clover
// declare the dslash events
#include <dslash_events.cuh>
using namespace twistedclover;
#if (__COMPUTE_CAPABILITY__ >= 200) && defined(GPU_TWISTED_CLOVER_DIRAC)
template <typename sFloat, typename gFloat, typename cFloat>
class TwistedCloverDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistCloverDslashType dslashType;
double a, b, c, d;
const cFloat *clover;
const float *cNorm;
const cFloat *cloverInv;
const float *cNrm2;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
TwistedCloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cFloat *clover, const float *cNorm,
const cFloat *cloverInv, const float *cNrm2, int cl_stride, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const QudaTwistCloverDslashType dslashType, const double kappa,
const double mu, const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct,dagger),gauge0(gauge0), gauge1(gauge1), clover(clover),
cNorm(cNorm), cloverInv(cloverInv), cNrm2(cNrm2), dslashType(dslashType)
{
bindSpinorTex<sFloat>(in, out, x);
dslashParam.cl_stride = cl_stride;
dslashParam.fl_stride = in->VolumeCB();
a = kappa;
b = mu;
c = epsilon;
d = k;
}
virtual ~TwistedCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
switch(dslashType){
case QUDA_DEG_CLOVER_TWIST_INV_DSLASH:
strcat(key.aux,",CloverTwistInvDslash");
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_INV:
strcat(key.aux,",Dslash");
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY:
strcat(key.aux,",DslashCloverTwist");
break;
}
return key;
}
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch(dslashType){
case QUDA_DEG_CLOVER_TWIST_INV_DSLASH:
DSLASH(twistedCloverInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cNorm, cloverInv, cNrm2,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_INV:
DSLASH(twistedCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cNorm, cloverInv, cNrm2,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY:
DSLASH(twistedCloverDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cNorm, cloverInv, cNrm2,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)x->V(), (float*)x->Norm());
break;
default: errorQuda("Invalid twisted clover dslash type");
}
}
long long flops() const {
int clover_flops = 504 + 48;
long long flops = DslashCuda::flops();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
// clover flops are done in the interior kernel
flops += clover_flops * in->VolumeCB();
break;
}
return flops;
}
long long bytes() const {
bool isHalf = in->Precision() == sizeof(short) ? true : false;
int clover_bytes = 72 * in->Precision() + (isHalf ? 2*sizeof(float) : 0);
long long bytes = DslashCuda::bytes();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
bytes += clover_bytes*in->VolumeCB();
break;
}
return bytes;
}
};
#endif // GPU_TWISTED_CLOVER_DIRAC
#include <dslash_policy.cuh>
void twistedCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover *clover, const FullClover *cloverInv,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistCloverDslashType type, const double &kappa, const double &mu,
const double &epsilon, const double &k, const int *commOverride,
TimeProfile &profile, const QudaDslashPolicy &dslashPolicy)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#if (__COMPUTE_CAPABILITY__ >= 200) && defined(GPU_TWISTED_CLOVER_DIRAC)
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->Volume() : in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = ((in->TwistFlavor() == QUDA_TWIST_PLUS) || (in->TwistFlavor() == QUDA_TWIST_MINUS)) ? in->GhostFace()[i] : in->GhostFace()[i] / 2;
}
#ifdef MULTI_GPU
twist_a = 0.;
twist_b = 0.;
#endif
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
void *cloverP, *cloverNormP, *cloverInvP, *cloverInvNormP;
QudaPrecision clover_prec = bindTwistedCloverTex(*clover, *cloverInv, parity, &cloverP, &cloverNormP, &cloverInvP, &cloverInvNormP);
if (in->Precision() != clover_prec)
errorQuda("Mixing clover and spinor precision not supported");
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
if (clover->stride != cloverInv->stride)
errorQuda("clover and cloverInv must have matching strides (%d != %d)", clover->stride, cloverInv->stride);
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new TwistedCloverDslashCuda<double2,double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), (double2*)cloverP, (float*)cloverNormP,
(double2*)cloverInvP, (float*)cloverInvNormP, clover->stride, in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new TwistedCloverDslashCuda<float4,float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), (float4*)cloverP, (float*)cloverNormP,
(float4*)cloverInvP, (float*)cloverInvNormP, clover->stride, in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new TwistedCloverDslashCuda<short4,short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), (short4*)cloverP, (float*)cloverNormP,
(short4*)cloverInvP, (float*)cloverInvNormP, clover->stride, in, x, type, kappa, mu, epsilon, k, dagger);
}
#ifndef GPU_COMMS
DslashPolicyImp* dslashImp = DslashFactory::create(dslashPolicy);
#else
DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
#endif
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
delete dslashImp;
delete dslash;
unbindGaugeTex(gauge);
unbindTwistedCloverTex(*clover);
checkCudaError();
#else
#if (__COMPUTE_CAPABILITY__ < 200)
errorQuda("Twisted-clover fermions not supported on pre-Fermi architecture");
#else
errorQuda("Twisted clover dslash has not been built");
#endif
#endif
}
}
|
fb13b63ec89cf64d16a50522521701a4837ae971.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// cudafeature/feature-spectral-cuda.cu
//
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
// Justin Luitjens
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if HAVE_CUDA == 1
#include <roctracer/roctx.h>
#include <hipcub/hipcub.hpp>
#endif
#include "cudafeat/feature-spectral-cuda.h"
#include "cudamatrix/cu-rand.h"
// Each thread block processes a unique frame
// threads in the same threadblock collaborate to
// compute the frame together.
__global__ void apply_lifter_and_floor_energy(
int num_frames, int num_cols, float cepstral_lifter, bool use_energy,
float energy_floor, float *log_energy, float *lifter_coeffs,
float *features, int32_t ldf) {
int thread_id = threadIdx.x;
int frame = blockIdx.x;
float *feats = features + frame * ldf;
// apply lifter coefficients
if (cepstral_lifter != 0.0f) {
for (int c = thread_id; c < num_cols; c += CU1DBLOCK) {
float lift = lifter_coeffs[c];
float f = feats[c];
feats[c] = f * lift;
}
}
// Thread 0 for each frame will apply energy
if (use_energy && thread_id == 0) {
float energy = log_energy[frame];
float log_energy_floor = log(energy_floor);
if (energy_floor > 0.0f && energy < log_energy_floor) {
energy = log_energy_floor;
}
feats[0] = energy;
}
}
// Each threadblock computes a different row of the matrix.
// Threads in the same block compute the row collaboratively.
// This kernel must be called out of place (A_in!=A_out).
__global__ void power_spectrum_kernel(int row_length, float *A_in, int32_t ldi,
float *A_out, int32_t ldo,
bool use_power) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
float *Ar = A_in + block_id * ldi;
float *Aw = A_out + block_id * ldo;
int half_length = row_length / 2;
for (int idx = thread_id; idx < half_length; idx += CU1DBLOCK) {
// ignore special case
if (idx == 0) continue;
float2 val = reinterpret_cast<float2 *>(Ar)[idx];
float ret = val.x * val.x + val.y * val.y;
if (use_power) {
Aw[idx] = ret;
} else {
Aw[idx] = sqrtf(ret);
}
}
// handle special case
if (threadIdx.x == 0) {
float real = Ar[0];
// cufft puts this at the end, this is different than kaldi does with its
// own
// internal implementation
float im = Ar[row_length];
if (use_power) {
Aw[0] = real * real;
Aw[half_length] = im * im;
} else {
Aw[0] = fabs(real);
Aw[half_length] = fabs(im);
}
}
}
// Expects to be called with 32x8 sized thread block.
// LDB: Adding use_log flag
__global__ void mel_banks_compute_kernel(int32_t num_frames, float energy_floor,
int32 *offsets, int32 *sizes,
float **vecs, const float *feats,
int32_t ldf, float *mels, int32_t ldm,
bool use_log) {
// Specialize WarpReduce for type float
typedef hipcub::WarpReduce<float> WarpReduce;
// Allocate WarpReduce shared memory for 8 warps
__shared__ typename WarpReduce::TempStorage temp_storage[8];
// warp will work together to compute sum
int tid = threadIdx.x;
int wid = threadIdx.y;
// blocks in the x dimension take different bins
int bin = blockIdx.x;
// frame is a combination of blocks in the y dimension and threads in the y
// dimension
int frame = blockIdx.y * blockDim.y + threadIdx.y;
if (frame >= num_frames) return;
int offset = offsets[bin];
int size = sizes[bin];
const float *v = vecs[bin];
const float *w = feats + frame * ldf + offset;
// perfom local sum
float sum = 0;
for (int idx = tid; idx < size; idx += 32) {
sum += v[idx] * w[idx];
}
// Sum in cub
sum = WarpReduce(temp_storage[wid]).Sum(sum);
if (tid == 0) {
if (use_log) {
// avoid log of zero
if (sum < energy_floor) sum = energy_floor;
float val = logf(sum);
mels[frame * ldm + bin] = val;
} else {
mels[frame * ldm + bin] = sum;
}
}
}
__global__ void process_window_kernel(
int frame_length, float dither, float energy_floor, bool remove_dc_offset,
float preemph_coeff, bool need_raw_log_energy, float *log_energy_pre_window,
const float *windowing, float *tmp_windows, int32_t ldt, float *windows,
int32_t ldw) {
// Specialize WarpReduce for type float
typedef hipcub::BlockReduce<float, CU1DBLOCK> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int thread_id = threadIdx.x;
int row = blockIdx.x;
float *tmp_window = tmp_windows + row * ldt;
float *window = windows + row * ldw;
__shared__ float ssum;
float sum = 0;
float wdot = 0;
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
// tmp_window contains optional dither. Apply that on read.
float wval = window[idx];
if (dither != 0.0f) {
wval += tmp_window[idx] * dither;
}
// compute local sum for removing dc offset
sum += wval;
// compute dot product for log energy
wdot += wval * wval;
float windowing_mul = 1;
if (remove_dc_offset == false && preemph_coeff == 0.0f) {
// we are done here so set windowing multiplication on write.
windowing_mul = windowing[idx];
}
// write dithered output
window[idx] = wval * windowing_mul;
}
__syncthreads();
if (remove_dc_offset) {
// we will recompute this below
wdot = 0.0f;
// use cub to reduce
sum = BlockReduce(temp_storage).Sum(sum);
// broadcast sum to entire block
if (thread_id == 0) ssum = sum;
__syncthreads();
sum = -ssum / frame_length;
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
float windowing_mul = 1;
float *out = window;
if (preemph_coeff == 0.0f) {
// we are done here so apply windowing
windowing_mul = windowing[idx];
} else {
// write to temp window as we will copy back into window
// when doing pre-emphasis
out = tmp_window;
}
// updated window value
float wval = window[idx] + sum;
// compute new dot product with dc offset removed
wdot += wval * wval;
// write output
out[idx] = wval * windowing_mul;
}
}
__syncthreads();
// if pointer is not NULL we will set energy to either
// the computed energy or 0 depending on need_raw_log_energy
if (log_energy_pre_window != NULL) {
float energy = 0.0f;
if (need_raw_log_energy) {
// must sync to use retemp_storage
if (remove_dc_offset) __syncthreads();
// use cub to reduce
wdot = BlockReduce(temp_storage).Sum(wdot);
energy = max(wdot, energy_floor);
}
if (thread_id == 0) {
log_energy_pre_window[row] = log(energy);
}
}
// TODO this could be more efficient using shared memory instead of
// tmp_window.
if (preemph_coeff != 0.0f) {
// wait for tmp_window to be computed
__threadfence();
__syncthreads();
// starting thread idx at 0 to keep writes aligned.
// unaligned reads are less painful then unaligned writes
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
float wval = tmp_window[idx];
float prev_window = wval;
if (idx > 0) {
prev_window = tmp_window[idx - 1];
}
// use __fmul_rn to match CPU
// window[idx] = (wval - preemph_coeff*prev_window) * windowing[idx];
window[idx] =
(wval - __fmul_rn(preemph_coeff, prev_window)) * windowing[idx];
}
}
}
__device__ inline int32 FirstSampleOfFrame(int32 frame, int32 frame_shift,
int32 window_size, bool snip_edges) {
if (snip_edges) {
return frame * frame_shift;
} else {
int32 midpoint_of_frame = frame_shift * frame + frame_shift / 2,
beginning_of_frame = midpoint_of_frame - window_size / 2;
return beginning_of_frame;
}
}
__global__ void extract_window_kernel(
int32 frame_shift, int32 frame_length, int32 frame_length_padded,
int32 window_size, bool snip_edges, int32_t sample_offset,
const BaseFloat __restrict__ *wave, int32 wave_dim,
BaseFloat *__restrict__ windows, int32_t wlda) {
int frame = blockIdx.x;
int tidx = threadIdx.x;
int32 start_sample =
FirstSampleOfFrame(frame, frame_shift, window_size, snip_edges);
// wave_start and wave_end are start and end indexes into 'wave', for the
// piece of wave that we're trying to extract.
int32 wave_start = int32(start_sample - sample_offset),
wave_end = wave_start + frame_length;
BaseFloat *window = windows + frame * wlda;
if (wave_start >= 0 && wave_end <= wave_dim) {
// the normal case-- no edge effects to consider.
for (int i = tidx; i < frame_length; i += blockDim.x) {
window[i] = wave[wave_start + i];
}
} else {
// Deal with any end effects by reflection, if needed. This code will only
// be reached for about two frames per utterance, so we don't concern
// ourselves excessively with efficiency.
for (int s = tidx; s < frame_length; s += blockDim.x) {
int32 s_in_wave = s + wave_start;
while (s_in_wave < 0 || s_in_wave >= wave_dim) {
// reflect around the beginning or end of the wave.
// e.g. -1 -> 0, -2 -> 1.
// dim -> dim - 1, dim + 1 -> dim - 2.
// the code supports repeated reflections, although this
// would only be needed in pathological cases.
if (s_in_wave < 0)
s_in_wave = -s_in_wave - 1;
else
s_in_wave = 2 * wave_dim - 1 - s_in_wave;
}
window[s] = wave[s_in_wave];
}
}
if (frame_length_padded > frame_length) {
for (int i = frame_length + tidx; i < frame_length_padded;
i += blockDim.x) {
window[i] = 0.0f;
}
}
}
// For each frame
// compute logf(dot(signal_frame, signal_frame))
__global__ void dot_log_kernel(int32_t num_frames, int32_t frame_length,
float *signal_frame, int32_t lds,
float *signal_log_energy) {
// Specialize WarpReduce for type float
typedef hipcub::BlockReduce<float, CU1DBLOCK> BlockReduce;
// Allocate WarpReduce shared memory for 8 warps
__shared__ typename BlockReduce::TempStorage temp_storage;
int32_t frame = blockIdx.x;
int32_t tid = threadIdx.x;
float *in = signal_frame + frame * lds;
float sum = 0;
// preform local dot product
for (int32_t i = tid; i < frame_length; i += blockDim.x) {
float val = in[i];
sum += val * val;
}
// reduce using cub
sum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
signal_log_energy[frame] = logf(sum);
}
}
namespace kaldi {
CudaSpectralFeatures::CudaSpectralFeatures(const CudaSpectralFeatureOptions &opts)
: MfccComputer(opts.mfcc_opts),
cu_lifter_coeffs_(lifter_coeffs_),
cu_dct_matrix_(dct_matrix_),
window_function_(opts.mfcc_opts.frame_opts) {
const MelBanks *mel_banks = GetMelBanks(1.0);
const std::vector<std::pair<int32, Vector<BaseFloat>>> &bins =
mel_banks->GetBins();
int size = bins.size();
bin_size_ = size;
std::vector<int32> offsets(size), sizes(size);
std::vector<float *> vecs(size);
cu_vecs_ = new CuVector<float>[size];
for (int i = 0; i < bins.size(); i++) {
cu_vecs_[i].Resize(bins[i].second.Dim(), kUndefined);
cu_vecs_[i].CopyFromVec(bins[i].second);
vecs[i] = cu_vecs_[i].Data();
sizes[i] = cu_vecs_[i].Dim();
offsets[i] = bins[i].first;
}
offsets_ = static_cast<int32 *>(
CuDevice::Instantiate().Malloc(size * sizeof(int32)));
sizes_ = static_cast<int32 *>(
CuDevice::Instantiate().Malloc(size * sizeof(int32)));
vecs_ = static_cast<float **>(
CuDevice::Instantiate().Malloc(size * sizeof(float *)));
CU_SAFE_CALL(hipMemcpyAsync(vecs_, &vecs[0], size * sizeof(float *),
hipMemcpyHostToDevice, cudaStreamPerThread));
CU_SAFE_CALL(hipMemcpyAsync(offsets_, &offsets[0], size * sizeof(int32),
hipMemcpyHostToDevice, cudaStreamPerThread));
CU_SAFE_CALL(hipMemcpyAsync(sizes_, &sizes[0], size * sizeof(int32),
hipMemcpyHostToDevice, cudaStreamPerThread));
CU_SAFE_CALL(hipStreamSynchronize(cudaStreamPerThread));
frame_length_ = opts.mfcc_opts.frame_opts.WindowSize();
padded_length_ = opts.mfcc_opts.frame_opts.PaddedWindowSize();
fft_length_ = padded_length_ / 2; // + 1;
fft_size_ = 800;
// place holders to get strides for cufft. these will be resized correctly
// later. The +2 for cufft/fftw requirements of an extra element at the end.
// turning off stride because cufft seems buggy with a stride
cu_windows_.Resize(fft_size_, padded_length_, kUndefined,
kStrideEqualNumCols);
tmp_window_.Resize(fft_size_, padded_length_ + 2, kUndefined,
kStrideEqualNumCols);
stride_ = cu_windows_.Stride();
tmp_stride_ = tmp_window_.Stride();
hipfftPlanMany(&plan_, 1, &padded_length_, NULL, 1, stride_, NULL, 1,
tmp_stride_ / 2, HIPFFT_R2C, fft_size_);
hipfftSetStream(plan_, cudaStreamPerThread);
cumfcc_opts_ = opts;
}
// ExtractWindow extracts a windowed frame of waveform with a power-of-two,
// padded size. It does mean subtraction, pre-emphasis and dithering as
// requested.
void CudaSpectralFeatures::ExtractWindows(int32_t num_frames, int64 sample_offset,
const CuVectorBase<BaseFloat> &wave,
const FrameExtractionOptions &opts) {
KALDI_ASSERT(sample_offset >= 0 && wave.Dim() != 0);
int32 frame_length = opts.WindowSize(),
frame_length_padded = opts.PaddedWindowSize();
int64 num_samples = sample_offset + wave.Dim();
hipLaunchKernelGGL(( extract_window_kernel), dim3(num_frames), dim3(CU1DBLOCK), 0, 0,
opts.WindowShift(), frame_length, frame_length_padded, opts.WindowSize(),
opts.snip_edges, sample_offset, wave.Data(), wave.Dim(),
cu_windows_.Data(), cu_windows_.Stride());
CU_SAFE_CALL(hipGetLastError());
}
void CudaSpectralFeatures::ProcessWindows(int num_frames,
const FrameExtractionOptions &opts,
CuVectorBase<BaseFloat> *log_energy_pre_window) {
if (num_frames == 0) return;
int fft_num_frames = cu_windows_.NumRows();
KALDI_ASSERT(fft_num_frames % fft_size_ == 0);
hipLaunchKernelGGL(( process_window_kernel), dim3(num_frames), dim3(CU1DBLOCK), 0, 0,
frame_length_, opts.dither, std::numeric_limits<float>::epsilon(),
opts.remove_dc_offset, opts.preemph_coeff, NeedRawLogEnergy(),
log_energy_pre_window->Data(), window_function_.cu_window.Data(),
tmp_window_.Data(), tmp_window_.Stride(), cu_windows_.Data(),
cu_windows_.Stride());
CU_SAFE_CALL(hipGetLastError());
}
void CudaSpectralFeatures::ComputeFinalFeatures(int num_frames, BaseFloat vtln_wrap,
CuVector<BaseFloat> *cu_signal_log_energy,
CuMatrix<BaseFloat> *cu_features) {
MfccOptions mfcc_opts = cumfcc_opts_.mfcc_opts;
Vector<float> tmp;
assert(mfcc_opts.htk_compat == false);
if (num_frames == 0) return;
if (mfcc_opts.use_energy && !mfcc_opts.raw_energy) {
hipLaunchKernelGGL(( dot_log_kernel), dim3(num_frames), dim3(CU1DBLOCK), 0, 0,
num_frames, cu_windows_.NumCols(), cu_windows_.Data(),
cu_windows_.Stride(), cu_signal_log_energy->Data());
CU_SAFE_CALL(hipGetLastError());
}
// make sure a reallocation hasn't changed these
KALDI_ASSERT(cu_windows_.Stride() == stride_);
KALDI_ASSERT(tmp_window_.Stride() == tmp_stride_);
// Perform FFTs in batches of fft_size. This reduces memory requirements
for (int idx = 0; idx < num_frames; idx += fft_size_) {
CUFFT_SAFE_CALL(hipfftExecR2C(
plan_, cu_windows_.Data() + cu_windows_.Stride() * idx,
(hipfftComplex *)(tmp_window_.Data() + tmp_window_.Stride() * idx)));
}
// Compute Power spectrum
CuMatrix<BaseFloat> power_spectrum(tmp_window_.NumRows(),
padded_length_ / 2 + 1, kUndefined);
hipLaunchKernelGGL(( power_spectrum_kernel), dim3(num_frames), dim3(CU1DBLOCK), 0, 0,
padded_length_, tmp_window_.Data(), tmp_window_.Stride(),
power_spectrum.Data(), power_spectrum.Stride(), cumfcc_opts_.use_power);
CU_SAFE_CALL(hipGetLastError());
// mel banks
int num_bins = bin_size_;
cu_mel_energies_.Resize(num_frames, num_bins, kUndefined);
dim3 mel_threads(32, 8);
dim3 mel_blocks(num_bins, (num_frames + mel_threads.y - 1) / mel_threads.y);
hipLaunchKernelGGL(( mel_banks_compute_kernel), dim3(mel_blocks), dim3(mel_threads), 0, 0,
num_frames, std::numeric_limits<float>::epsilon(), offsets_, sizes_,
vecs_, power_spectrum.Data(), power_spectrum.Stride(),
cu_mel_energies_.Data(), cu_mel_energies_.Stride(),
cumfcc_opts_.use_log_fbank);
CU_SAFE_CALL(hipGetLastError());
// dct transform
if (cumfcc_opts_.use_dct) {
cu_features->AddMatMat(1.0, cu_mel_energies_, kNoTrans, cu_dct_matrix_,
kTrans, 0.0);
hipLaunchKernelGGL(( apply_lifter_and_floor_energy), dim3(num_frames), dim3(CU1DBLOCK), 0, 0,
cu_features->NumRows(), cu_features->NumCols(),
mfcc_opts.cepstral_lifter, mfcc_opts.use_energy,
mfcc_opts.energy_floor, cu_signal_log_energy->Data(),
cu_lifter_coeffs_.Data(), cu_features->Data(), cu_features->Stride());
} else {
hipMemcpyAsync(cu_features->Data(), cu_mel_energies_.Data(),
sizeof(BaseFloat) * num_frames * cu_features->Stride(),
hipMemcpyDeviceToDevice, cudaStreamPerThread);
}
CU_SAFE_CALL(hipGetLastError());
}
void CudaSpectralFeatures::ComputeFeatures(const CuVectorBase<BaseFloat> &cu_wave,
BaseFloat sample_freq, BaseFloat vtln_warp,
CuMatrix<BaseFloat> *cu_features) {
roctxRangePushA("CudaSpectralFeatures::ComputeFeatures");
const FrameExtractionOptions &frame_opts = GetFrameOptions();
int num_frames = NumFrames(cu_wave.Dim(), frame_opts, true);
// compute fft frames by rounding up to a multiple of fft_size_
int fft_num_frames = num_frames + (fft_size_ - num_frames % fft_size_);
int feature_dim = Dim();
bool use_raw_log_energy = NeedRawLogEnergy();
CuVector<BaseFloat> raw_log_energies;
raw_log_energies.Resize(num_frames, kUndefined);
cu_windows_.Resize(fft_num_frames, padded_length_, kUndefined,
kStrideEqualNumCols);
cu_features->Resize(num_frames, feature_dim, kUndefined);
//+1 matches cufft/fftw requirements
tmp_window_.Resize(fft_num_frames, padded_length_ + 2, kUndefined,
kStrideEqualNumCols);
if (frame_opts.dither != 0.0f) {
// Calling cu-rand directly
// CuRand class works on CuMatrixBase which must
// assume that the matrix is part of a larger matrix
// Doing this directly avoids unecessary memory copies
CURAND_SAFE_CALL(
hiprandGenerateNormal(GetCurandHandle(), tmp_window_.Data(),
tmp_window_.NumRows() * tmp_window_.Stride(),
0.0 /*mean*/, 1.0 /*stddev*/));
}
// Extract Windows
ExtractWindows(num_frames, 0, cu_wave, frame_opts);
// Process Windows
ProcessWindows(num_frames, frame_opts, &raw_log_energies);
// Compute Features
ComputeFinalFeatures(num_frames, 1.0, &raw_log_energies, cu_features);
roctxRangePop();
}
CudaSpectralFeatures::~CudaSpectralFeatures() {
delete[] cu_vecs_;
CuDevice::Instantiate().Free(vecs_);
CuDevice::Instantiate().Free(offsets_);
CuDevice::Instantiate().Free(sizes_);
hipfftDestroy(plan_);
}
} // namespace kaldi
| fb13b63ec89cf64d16a50522521701a4837ae971.cu | // cudafeature/feature-spectral-cuda.cu
//
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
// Justin Luitjens
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if HAVE_CUDA == 1
#include <nvToolsExt.h>
#include <cub/cub.cuh>
#endif
#include "cudafeat/feature-spectral-cuda.h"
#include "cudamatrix/cu-rand.h"
// Each thread block processes a unique frame
// threads in the same threadblock collaborate to
// compute the frame together.
__global__ void apply_lifter_and_floor_energy(
int num_frames, int num_cols, float cepstral_lifter, bool use_energy,
float energy_floor, float *log_energy, float *lifter_coeffs,
float *features, int32_t ldf) {
int thread_id = threadIdx.x;
int frame = blockIdx.x;
float *feats = features + frame * ldf;
// apply lifter coefficients
if (cepstral_lifter != 0.0f) {
for (int c = thread_id; c < num_cols; c += CU1DBLOCK) {
float lift = lifter_coeffs[c];
float f = feats[c];
feats[c] = f * lift;
}
}
// Thread 0 for each frame will apply energy
if (use_energy && thread_id == 0) {
float energy = log_energy[frame];
float log_energy_floor = log(energy_floor);
if (energy_floor > 0.0f && energy < log_energy_floor) {
energy = log_energy_floor;
}
feats[0] = energy;
}
}
// Each threadblock computes a different row of the matrix.
// Threads in the same block compute the row collaboratively.
// This kernel must be called out of place (A_in!=A_out).
__global__ void power_spectrum_kernel(int row_length, float *A_in, int32_t ldi,
float *A_out, int32_t ldo,
bool use_power) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
float *Ar = A_in + block_id * ldi;
float *Aw = A_out + block_id * ldo;
int half_length = row_length / 2;
for (int idx = thread_id; idx < half_length; idx += CU1DBLOCK) {
// ignore special case
if (idx == 0) continue;
float2 val = reinterpret_cast<float2 *>(Ar)[idx];
float ret = val.x * val.x + val.y * val.y;
if (use_power) {
Aw[idx] = ret;
} else {
Aw[idx] = sqrtf(ret);
}
}
// handle special case
if (threadIdx.x == 0) {
float real = Ar[0];
// cufft puts this at the end, this is different than kaldi does with its
// own
// internal implementation
float im = Ar[row_length];
if (use_power) {
Aw[0] = real * real;
Aw[half_length] = im * im;
} else {
Aw[0] = fabs(real);
Aw[half_length] = fabs(im);
}
}
}
// Expects to be called with 32x8 sized thread block.
// LDB: Adding use_log flag
__global__ void mel_banks_compute_kernel(int32_t num_frames, float energy_floor,
int32 *offsets, int32 *sizes,
float **vecs, const float *feats,
int32_t ldf, float *mels, int32_t ldm,
bool use_log) {
// Specialize WarpReduce for type float
typedef cub::WarpReduce<float> WarpReduce;
// Allocate WarpReduce shared memory for 8 warps
__shared__ typename WarpReduce::TempStorage temp_storage[8];
// warp will work together to compute sum
int tid = threadIdx.x;
int wid = threadIdx.y;
// blocks in the x dimension take different bins
int bin = blockIdx.x;
// frame is a combination of blocks in the y dimension and threads in the y
// dimension
int frame = blockIdx.y * blockDim.y + threadIdx.y;
if (frame >= num_frames) return;
int offset = offsets[bin];
int size = sizes[bin];
const float *v = vecs[bin];
const float *w = feats + frame * ldf + offset;
// perfom local sum
float sum = 0;
for (int idx = tid; idx < size; idx += 32) {
sum += v[idx] * w[idx];
}
// Sum in cub
sum = WarpReduce(temp_storage[wid]).Sum(sum);
if (tid == 0) {
if (use_log) {
// avoid log of zero
if (sum < energy_floor) sum = energy_floor;
float val = logf(sum);
mels[frame * ldm + bin] = val;
} else {
mels[frame * ldm + bin] = sum;
}
}
}
__global__ void process_window_kernel(
int frame_length, float dither, float energy_floor, bool remove_dc_offset,
float preemph_coeff, bool need_raw_log_energy, float *log_energy_pre_window,
const float *windowing, float *tmp_windows, int32_t ldt, float *windows,
int32_t ldw) {
// Specialize WarpReduce for type float
typedef cub::BlockReduce<float, CU1DBLOCK> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int thread_id = threadIdx.x;
int row = blockIdx.x;
float *tmp_window = tmp_windows + row * ldt;
float *window = windows + row * ldw;
__shared__ float ssum;
float sum = 0;
float wdot = 0;
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
// tmp_window contains optional dither. Apply that on read.
float wval = window[idx];
if (dither != 0.0f) {
wval += tmp_window[idx] * dither;
}
// compute local sum for removing dc offset
sum += wval;
// compute dot product for log energy
wdot += wval * wval;
float windowing_mul = 1;
if (remove_dc_offset == false && preemph_coeff == 0.0f) {
// we are done here so set windowing multiplication on write.
windowing_mul = windowing[idx];
}
// write dithered output
window[idx] = wval * windowing_mul;
}
__syncthreads();
if (remove_dc_offset) {
// we will recompute this below
wdot = 0.0f;
// use cub to reduce
sum = BlockReduce(temp_storage).Sum(sum);
// broadcast sum to entire block
if (thread_id == 0) ssum = sum;
__syncthreads();
sum = -ssum / frame_length;
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
float windowing_mul = 1;
float *out = window;
if (preemph_coeff == 0.0f) {
// we are done here so apply windowing
windowing_mul = windowing[idx];
} else {
// write to temp window as we will copy back into window
// when doing pre-emphasis
out = tmp_window;
}
// updated window value
float wval = window[idx] + sum;
// compute new dot product with dc offset removed
wdot += wval * wval;
// write output
out[idx] = wval * windowing_mul;
}
}
__syncthreads();
// if pointer is not NULL we will set energy to either
// the computed energy or 0 depending on need_raw_log_energy
if (log_energy_pre_window != NULL) {
float energy = 0.0f;
if (need_raw_log_energy) {
// must sync to use retemp_storage
if (remove_dc_offset) __syncthreads();
// use cub to reduce
wdot = BlockReduce(temp_storage).Sum(wdot);
energy = max(wdot, energy_floor);
}
if (thread_id == 0) {
log_energy_pre_window[row] = log(energy);
}
}
// TODO this could be more efficient using shared memory instead of
// tmp_window.
if (preemph_coeff != 0.0f) {
// wait for tmp_window to be computed
__threadfence();
__syncthreads();
// starting thread idx at 0 to keep writes aligned.
// unaligned reads are less painful then unaligned writes
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
float wval = tmp_window[idx];
float prev_window = wval;
if (idx > 0) {
prev_window = tmp_window[idx - 1];
}
// use __fmul_rn to match CPU
// window[idx] = (wval - preemph_coeff*prev_window) * windowing[idx];
window[idx] =
(wval - __fmul_rn(preemph_coeff, prev_window)) * windowing[idx];
}
}
}
__device__ inline int32 FirstSampleOfFrame(int32 frame, int32 frame_shift,
int32 window_size, bool snip_edges) {
if (snip_edges) {
return frame * frame_shift;
} else {
int32 midpoint_of_frame = frame_shift * frame + frame_shift / 2,
beginning_of_frame = midpoint_of_frame - window_size / 2;
return beginning_of_frame;
}
}
__global__ void extract_window_kernel(
int32 frame_shift, int32 frame_length, int32 frame_length_padded,
int32 window_size, bool snip_edges, int32_t sample_offset,
const BaseFloat __restrict__ *wave, int32 wave_dim,
BaseFloat *__restrict__ windows, int32_t wlda) {
int frame = blockIdx.x;
int tidx = threadIdx.x;
int32 start_sample =
FirstSampleOfFrame(frame, frame_shift, window_size, snip_edges);
// wave_start and wave_end are start and end indexes into 'wave', for the
// piece of wave that we're trying to extract.
int32 wave_start = int32(start_sample - sample_offset),
wave_end = wave_start + frame_length;
BaseFloat *window = windows + frame * wlda;
if (wave_start >= 0 && wave_end <= wave_dim) {
// the normal case-- no edge effects to consider.
for (int i = tidx; i < frame_length; i += blockDim.x) {
window[i] = wave[wave_start + i];
}
} else {
// Deal with any end effects by reflection, if needed. This code will only
// be reached for about two frames per utterance, so we don't concern
// ourselves excessively with efficiency.
for (int s = tidx; s < frame_length; s += blockDim.x) {
int32 s_in_wave = s + wave_start;
while (s_in_wave < 0 || s_in_wave >= wave_dim) {
// reflect around the beginning or end of the wave.
// e.g. -1 -> 0, -2 -> 1.
// dim -> dim - 1, dim + 1 -> dim - 2.
// the code supports repeated reflections, although this
// would only be needed in pathological cases.
if (s_in_wave < 0)
s_in_wave = -s_in_wave - 1;
else
s_in_wave = 2 * wave_dim - 1 - s_in_wave;
}
window[s] = wave[s_in_wave];
}
}
if (frame_length_padded > frame_length) {
for (int i = frame_length + tidx; i < frame_length_padded;
i += blockDim.x) {
window[i] = 0.0f;
}
}
}
// For each frame
// compute logf(dot(signal_frame, signal_frame))
__global__ void dot_log_kernel(int32_t num_frames, int32_t frame_length,
float *signal_frame, int32_t lds,
float *signal_log_energy) {
// Specialize WarpReduce for type float
typedef cub::BlockReduce<float, CU1DBLOCK> BlockReduce;
// Allocate WarpReduce shared memory for 8 warps
__shared__ typename BlockReduce::TempStorage temp_storage;
int32_t frame = blockIdx.x;
int32_t tid = threadIdx.x;
float *in = signal_frame + frame * lds;
float sum = 0;
// preform local dot product
for (int32_t i = tid; i < frame_length; i += blockDim.x) {
float val = in[i];
sum += val * val;
}
// reduce using cub
sum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
signal_log_energy[frame] = logf(sum);
}
}
namespace kaldi {
CudaSpectralFeatures::CudaSpectralFeatures(const CudaSpectralFeatureOptions &opts)
: MfccComputer(opts.mfcc_opts),
cu_lifter_coeffs_(lifter_coeffs_),
cu_dct_matrix_(dct_matrix_),
window_function_(opts.mfcc_opts.frame_opts) {
const MelBanks *mel_banks = GetMelBanks(1.0);
const std::vector<std::pair<int32, Vector<BaseFloat>>> &bins =
mel_banks->GetBins();
int size = bins.size();
bin_size_ = size;
std::vector<int32> offsets(size), sizes(size);
std::vector<float *> vecs(size);
cu_vecs_ = new CuVector<float>[size];
for (int i = 0; i < bins.size(); i++) {
cu_vecs_[i].Resize(bins[i].second.Dim(), kUndefined);
cu_vecs_[i].CopyFromVec(bins[i].second);
vecs[i] = cu_vecs_[i].Data();
sizes[i] = cu_vecs_[i].Dim();
offsets[i] = bins[i].first;
}
offsets_ = static_cast<int32 *>(
CuDevice::Instantiate().Malloc(size * sizeof(int32)));
sizes_ = static_cast<int32 *>(
CuDevice::Instantiate().Malloc(size * sizeof(int32)));
vecs_ = static_cast<float **>(
CuDevice::Instantiate().Malloc(size * sizeof(float *)));
CU_SAFE_CALL(cudaMemcpyAsync(vecs_, &vecs[0], size * sizeof(float *),
cudaMemcpyHostToDevice, cudaStreamPerThread));
CU_SAFE_CALL(cudaMemcpyAsync(offsets_, &offsets[0], size * sizeof(int32),
cudaMemcpyHostToDevice, cudaStreamPerThread));
CU_SAFE_CALL(cudaMemcpyAsync(sizes_, &sizes[0], size * sizeof(int32),
cudaMemcpyHostToDevice, cudaStreamPerThread));
CU_SAFE_CALL(cudaStreamSynchronize(cudaStreamPerThread));
frame_length_ = opts.mfcc_opts.frame_opts.WindowSize();
padded_length_ = opts.mfcc_opts.frame_opts.PaddedWindowSize();
fft_length_ = padded_length_ / 2; // + 1;
fft_size_ = 800;
// place holders to get strides for cufft. these will be resized correctly
// later. The +2 for cufft/fftw requirements of an extra element at the end.
// turning off stride because cufft seems buggy with a stride
cu_windows_.Resize(fft_size_, padded_length_, kUndefined,
kStrideEqualNumCols);
tmp_window_.Resize(fft_size_, padded_length_ + 2, kUndefined,
kStrideEqualNumCols);
stride_ = cu_windows_.Stride();
tmp_stride_ = tmp_window_.Stride();
cufftPlanMany(&plan_, 1, &padded_length_, NULL, 1, stride_, NULL, 1,
tmp_stride_ / 2, CUFFT_R2C, fft_size_);
cufftSetStream(plan_, cudaStreamPerThread);
cumfcc_opts_ = opts;
}
// ExtractWindow extracts a windowed frame of waveform with a power-of-two,
// padded size. It does mean subtraction, pre-emphasis and dithering as
// requested.
void CudaSpectralFeatures::ExtractWindows(int32_t num_frames, int64 sample_offset,
const CuVectorBase<BaseFloat> &wave,
const FrameExtractionOptions &opts) {
KALDI_ASSERT(sample_offset >= 0 && wave.Dim() != 0);
int32 frame_length = opts.WindowSize(),
frame_length_padded = opts.PaddedWindowSize();
int64 num_samples = sample_offset + wave.Dim();
extract_window_kernel<<<num_frames, CU1DBLOCK>>>(
opts.WindowShift(), frame_length, frame_length_padded, opts.WindowSize(),
opts.snip_edges, sample_offset, wave.Data(), wave.Dim(),
cu_windows_.Data(), cu_windows_.Stride());
CU_SAFE_CALL(cudaGetLastError());
}
void CudaSpectralFeatures::ProcessWindows(int num_frames,
const FrameExtractionOptions &opts,
CuVectorBase<BaseFloat> *log_energy_pre_window) {
if (num_frames == 0) return;
int fft_num_frames = cu_windows_.NumRows();
KALDI_ASSERT(fft_num_frames % fft_size_ == 0);
process_window_kernel<<<num_frames, CU1DBLOCK>>>(
frame_length_, opts.dither, std::numeric_limits<float>::epsilon(),
opts.remove_dc_offset, opts.preemph_coeff, NeedRawLogEnergy(),
log_energy_pre_window->Data(), window_function_.cu_window.Data(),
tmp_window_.Data(), tmp_window_.Stride(), cu_windows_.Data(),
cu_windows_.Stride());
CU_SAFE_CALL(cudaGetLastError());
}
void CudaSpectralFeatures::ComputeFinalFeatures(int num_frames, BaseFloat vtln_wrap,
CuVector<BaseFloat> *cu_signal_log_energy,
CuMatrix<BaseFloat> *cu_features) {
MfccOptions mfcc_opts = cumfcc_opts_.mfcc_opts;
Vector<float> tmp;
assert(mfcc_opts.htk_compat == false);
if (num_frames == 0) return;
if (mfcc_opts.use_energy && !mfcc_opts.raw_energy) {
dot_log_kernel<<<num_frames, CU1DBLOCK>>>(
num_frames, cu_windows_.NumCols(), cu_windows_.Data(),
cu_windows_.Stride(), cu_signal_log_energy->Data());
CU_SAFE_CALL(cudaGetLastError());
}
// make sure a reallocation hasn't changed these
KALDI_ASSERT(cu_windows_.Stride() == stride_);
KALDI_ASSERT(tmp_window_.Stride() == tmp_stride_);
// Perform FFTs in batches of fft_size. This reduces memory requirements
for (int idx = 0; idx < num_frames; idx += fft_size_) {
CUFFT_SAFE_CALL(cufftExecR2C(
plan_, cu_windows_.Data() + cu_windows_.Stride() * idx,
(cufftComplex *)(tmp_window_.Data() + tmp_window_.Stride() * idx)));
}
// Compute Power spectrum
CuMatrix<BaseFloat> power_spectrum(tmp_window_.NumRows(),
padded_length_ / 2 + 1, kUndefined);
power_spectrum_kernel<<<num_frames, CU1DBLOCK>>>(
padded_length_, tmp_window_.Data(), tmp_window_.Stride(),
power_spectrum.Data(), power_spectrum.Stride(), cumfcc_opts_.use_power);
CU_SAFE_CALL(cudaGetLastError());
// mel banks
int num_bins = bin_size_;
cu_mel_energies_.Resize(num_frames, num_bins, kUndefined);
dim3 mel_threads(32, 8);
dim3 mel_blocks(num_bins, (num_frames + mel_threads.y - 1) / mel_threads.y);
mel_banks_compute_kernel<<<mel_blocks, mel_threads>>>(
num_frames, std::numeric_limits<float>::epsilon(), offsets_, sizes_,
vecs_, power_spectrum.Data(), power_spectrum.Stride(),
cu_mel_energies_.Data(), cu_mel_energies_.Stride(),
cumfcc_opts_.use_log_fbank);
CU_SAFE_CALL(cudaGetLastError());
// dct transform
if (cumfcc_opts_.use_dct) {
cu_features->AddMatMat(1.0, cu_mel_energies_, kNoTrans, cu_dct_matrix_,
kTrans, 0.0);
apply_lifter_and_floor_energy<<<num_frames, CU1DBLOCK>>>(
cu_features->NumRows(), cu_features->NumCols(),
mfcc_opts.cepstral_lifter, mfcc_opts.use_energy,
mfcc_opts.energy_floor, cu_signal_log_energy->Data(),
cu_lifter_coeffs_.Data(), cu_features->Data(), cu_features->Stride());
} else {
cudaMemcpyAsync(cu_features->Data(), cu_mel_energies_.Data(),
sizeof(BaseFloat) * num_frames * cu_features->Stride(),
cudaMemcpyDeviceToDevice, cudaStreamPerThread);
}
CU_SAFE_CALL(cudaGetLastError());
}
void CudaSpectralFeatures::ComputeFeatures(const CuVectorBase<BaseFloat> &cu_wave,
BaseFloat sample_freq, BaseFloat vtln_warp,
CuMatrix<BaseFloat> *cu_features) {
nvtxRangePushA("CudaSpectralFeatures::ComputeFeatures");
const FrameExtractionOptions &frame_opts = GetFrameOptions();
int num_frames = NumFrames(cu_wave.Dim(), frame_opts, true);
// compute fft frames by rounding up to a multiple of fft_size_
int fft_num_frames = num_frames + (fft_size_ - num_frames % fft_size_);
int feature_dim = Dim();
bool use_raw_log_energy = NeedRawLogEnergy();
CuVector<BaseFloat> raw_log_energies;
raw_log_energies.Resize(num_frames, kUndefined);
cu_windows_.Resize(fft_num_frames, padded_length_, kUndefined,
kStrideEqualNumCols);
cu_features->Resize(num_frames, feature_dim, kUndefined);
//+1 matches cufft/fftw requirements
tmp_window_.Resize(fft_num_frames, padded_length_ + 2, kUndefined,
kStrideEqualNumCols);
if (frame_opts.dither != 0.0f) {
// Calling cu-rand directly
// CuRand class works on CuMatrixBase which must
// assume that the matrix is part of a larger matrix
// Doing this directly avoids unecessary memory copies
CURAND_SAFE_CALL(
curandGenerateNormal(GetCurandHandle(), tmp_window_.Data(),
tmp_window_.NumRows() * tmp_window_.Stride(),
0.0 /*mean*/, 1.0 /*stddev*/));
}
// Extract Windows
ExtractWindows(num_frames, 0, cu_wave, frame_opts);
// Process Windows
ProcessWindows(num_frames, frame_opts, &raw_log_energies);
// Compute Features
ComputeFinalFeatures(num_frames, 1.0, &raw_log_energies, cu_features);
nvtxRangePop();
}
CudaSpectralFeatures::~CudaSpectralFeatures() {
delete[] cu_vecs_;
CuDevice::Instantiate().Free(vecs_);
CuDevice::Instantiate().Free(offsets_);
CuDevice::Instantiate().Free(sizes_);
cufftDestroy(plan_);
}
} // namespace kaldi
|
8da33fd8dd283d0d220df6f6985a529c2991ecc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/maxouting.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
__global__ void KernelMaxOut(const int nthreads, const T* input_data,
const int channels, const int input_height,
const int input_width, int groups,
T* output_data) {
const int size = input_height * input_width * channels / groups;
const int feat_len = input_height * input_width;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
int batch_idx = i / size;
int batch_offset = i % size;
int channel_idx = batch_offset / feat_len;
int feat_idx = batch_offset % feat_len;
int data_idx =
(batch_idx * size + channel_idx * feat_len) * groups + feat_idx;
T ele = static_cast<T>(-FLT_MAX);
for (int g = 0; g < groups; ++g) {
T x = input_data[data_idx + g * feat_len];
ele = ele > x ? ele : x;
}
output_data[i] = ele;
}
}
template <typename T>
__global__ void KernelMaxoutGrad(const int nthreads, const T* input_data,
const T* output_data, const T* output_grad,
T* input_grad, const int channels,
const int input_height, const int input_width,
int groups) {
const int size = input_height * input_width * channels / groups;
const int feat_len = input_height * input_width;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
int batch_idx = i / size;
int batch_offset = i % size;
int channel_idx = batch_offset / feat_len;
int feat_idx = batch_offset % feat_len;
int data_idx =
(batch_idx * size + channel_idx * feat_len) * groups + feat_idx;
int max_index = -1;
bool continue_match = true;
for (int g = 0; g < groups && continue_match; ++g) {
if (input_data[data_idx + g * feat_len] == output_data[i]) {
max_index = data_idx + g * feat_len;
continue_match = false;
break;
}
}
if (max_index != -1) {
input_grad[max_index] += output_grad[index];
}
}
}
/*
* All tensors are in NCHW format.
*/
template <typename T>
class MaxOutFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, framework::Tensor* output,
int groups) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = output->numel();
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxOut<T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, input_channels, input_height, input_width, groups,
output_data);
}
};
/*
* All tensors are in NCHW format.
*/
template <typename T>
class MaxOutGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, framework::Tensor* input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad, int groups) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = output.numel();
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxoutGrad<T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, output_data, output_grad_data, input_grad_data,
input_channels, input_height, input_width, groups);
}
};
template class MaxOutGradFunctor<platform::CUDADeviceContext, float>;
template class MaxOutGradFunctor<platform::CUDADeviceContext, double>;
template class MaxOutFunctor<platform::CUDADeviceContext, float>;
template class MaxOutFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| 8da33fd8dd283d0d220df6f6985a529c2991ecc1.cu | /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/maxouting.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
__global__ void KernelMaxOut(const int nthreads, const T* input_data,
const int channels, const int input_height,
const int input_width, int groups,
T* output_data) {
const int size = input_height * input_width * channels / groups;
const int feat_len = input_height * input_width;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
int batch_idx = i / size;
int batch_offset = i % size;
int channel_idx = batch_offset / feat_len;
int feat_idx = batch_offset % feat_len;
int data_idx =
(batch_idx * size + channel_idx * feat_len) * groups + feat_idx;
T ele = static_cast<T>(-FLT_MAX);
for (int g = 0; g < groups; ++g) {
T x = input_data[data_idx + g * feat_len];
ele = ele > x ? ele : x;
}
output_data[i] = ele;
}
}
template <typename T>
__global__ void KernelMaxoutGrad(const int nthreads, const T* input_data,
const T* output_data, const T* output_grad,
T* input_grad, const int channels,
const int input_height, const int input_width,
int groups) {
const int size = input_height * input_width * channels / groups;
const int feat_len = input_height * input_width;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
int batch_idx = i / size;
int batch_offset = i % size;
int channel_idx = batch_offset / feat_len;
int feat_idx = batch_offset % feat_len;
int data_idx =
(batch_idx * size + channel_idx * feat_len) * groups + feat_idx;
int max_index = -1;
bool continue_match = true;
for (int g = 0; g < groups && continue_match; ++g) {
if (input_data[data_idx + g * feat_len] == output_data[i]) {
max_index = data_idx + g * feat_len;
continue_match = false;
break;
}
}
if (max_index != -1) {
input_grad[max_index] += output_grad[index];
}
}
}
/*
* All tensors are in NCHW format.
*/
template <typename T>
class MaxOutFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, framework::Tensor* output,
int groups) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = output->numel();
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxOut<T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_height, input_width, groups,
output_data);
}
};
/*
* All tensors are in NCHW format.
*/
template <typename T>
class MaxOutGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, framework::Tensor* input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad, int groups) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = output.numel();
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxoutGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_grad_data,
input_channels, input_height, input_width, groups);
}
};
template class MaxOutGradFunctor<platform::CUDADeviceContext, float>;
template class MaxOutGradFunctor<platform::CUDADeviceContext, double>;
template class MaxOutFunctor<platform::CUDADeviceContext, float>;
template class MaxOutFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
79a3146b6e005fd189a2351539fb133be17fdb64.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string>
#include <omp.h>
// Utilities and correctness-checking
#include <gunrock/util/multithread_utils.cuh>
#include <gunrock/util/sort_omp.cuh>
#include <gunrock/csr.cuh>
#include <gunrock/graphio/grmat.cuh>
#include <gunrock/coo.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <gunrock/util/shared_utils.cuh>
using namespace gunrock;
// using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::graphio;
using namespace gunrock::graphio::grmat;
void Usage() {
printf(
"test <graph-type> [graph-type-arguments]\n"
"Graph type and graph type arguments:\n"
" market <matrix-market-file-name>\n"
" Reads a Matrix-Market coordinate-formatted graph of\n"
" directed/undirected edges from STDIN (or from the\n"
" optionally-specified file).\n"
" rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n"
" Generate R-MAT graph as input\n"
" --rmat_scale=<vertex-scale>\n"
" --rmat_nodes=<number-nodes>\n"
" --rmat_edgefactor=<edge-factor>\n"
" --rmat_edges=<number-edges>\n"
" --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n"
" --rmat_seed=<seed>\n"
" --rmat_self_loops If this option is supplied, then self loops "
"will be retained\n"
" --rmat_undirected If this option is not mentioned, then the "
"graps will be undirected\n\n"
"Optional arguments:\n"
"[--file_name=<file name>] If the graph needs to be saved to a file, "
"else it will not be saved physically.\n"
"[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n"
"[--quiet] No output (unless --json is specified).\n"
"[--normalized]\n");
}
template <typename VertexId, typename Tuple, typename SizeT>
Tuple *RemoveSelfLoops(Tuple *coo, SizeT &final_edges, SizeT coo_edges) {
Tuple *new_coo = (Tuple *)malloc(sizeof(Tuple) * coo_edges);
int num_threads = 1;
SizeT *edge_counts = NULL;
Tuple **new_coo_arr = NULL;
final_edges = 0;
#pragma omp parallel
{
num_threads = omp_get_num_threads();
int thread_num = omp_get_thread_num();
if (thread_num == 0) {
edge_counts = new SizeT[num_threads + 1];
new_coo_arr = new Tuple *[num_threads + 1];
}
#pragma omp barrier
SizeT edge_start = (long long)(coo_edges)*thread_num / num_threads;
SizeT edge_end = (long long)(coo_edges) * (thread_num + 1) / num_threads;
new_coo_arr[thread_num] =
(Tuple *)malloc(sizeof(Tuple) * (edge_end - edge_start));
SizeT edge = edge_start;
SizeT new_edge = 0;
for (edge = edge_start; edge < edge_end; edge++) {
VertexId col = coo[edge].col;
VertexId row = coo[edge].row;
if ((col != row) &&
(edge == 0 || col != coo[edge - 1].col || row != coo[edge - 1].row)) {
new_coo_arr[thread_num][new_edge].col = (VertexId)col;
new_coo_arr[thread_num][new_edge].row = (VertexId)row;
new_coo_arr[thread_num][new_edge].val = coo[edge].val;
new_edge++;
}
}
edge_counts[thread_num] = new_edge;
}
SizeT edge = 0;
for (int i = 0; i < num_threads; i++) {
for (SizeT tmp_edge = 0; tmp_edge < edge_counts[i]; tmp_edge++) {
new_coo[edge].row = new_coo_arr[i][tmp_edge].row;
new_coo[edge].col = new_coo_arr[i][tmp_edge].col;
new_coo[edge].val = new_coo_arr[i][tmp_edge].val;
edge++;
}
free(new_coo_arr[i]);
}
delete[] new_coo_arr;
new_coo_arr = NULL;
final_edges = edge;
delete[] edge_counts;
edge_counts = NULL;
free(coo);
coo = new_coo;
return coo;
}
/**
* @brief Modify COO graph as per the expectation of user
*
* @param[in] output_file Output file to dump the graph topology info
* @param[in] coo Pointer to COO-format graph
* @param[in] coo_nodes Number of nodes in COO-format graph
* @param[in] coo_edges Number of edges in COO-format graph
* @param[in] ordered_rows Are the rows sorted? If not, sort them.
* @param[in] undirected Is the graph directed or not?
* @param[in] reversed Is the graph reversed or not?
* @param[in] quiet Don't print out anything.
* @param[in] self_loops is true if self loops are accepted in the graph.
*
* Default: Assume rows are not sorted.
*/
template <typename VertexId, typename Tuple, typename SizeT>
Tuple *FromCoo_MM(FILE *f, Tuple *coo, SizeT coo_nodes, SizeT coo_edges,
bool ordered_rows = false, bool undirected = false,
bool quiet = false, bool self_loops = false) {
SizeT rows = 0;
SizeT cols = 0;
util::CpuTimer cpu_timer;
if ((!quiet) && (!self_loops)) {
printf(
"Converting %lld vertices, %lld %s edges ( %s tuples) "
"to remove self loops...\n",
(long long)coo_nodes, (long long)coo_edges,
undirected ? "undirected" : "directed",
ordered_rows ? "ordered" : "unordered");
}
cpu_timer.Start();
int num_threads = 1;
SizeT *cols_tmp = NULL;
// Find the max col
#pragma omp parallel
{
num_threads = omp_get_num_threads();
int thread_num = omp_get_thread_num();
if (thread_num == 0) {
cols_tmp = new SizeT[num_threads + 1];
}
#pragma omp barrier
SizeT edge_start = (long long)(coo_edges)*thread_num / num_threads;
SizeT edge_end = (long long)(coo_edges) * (thread_num + 1) / num_threads;
SizeT edge = edge_start;
SizeT max = coo[edge_start].col;
for (edge = edge_start + 1; edge < edge_end; edge++) {
if (max < coo[edge].col) {
max = coo[edge].col;
}
}
cols_tmp[thread_num] = max;
}
cols = cols_tmp[0];
for (int i = 1; i < num_threads; i++) {
if (cols < cols_tmp[i]) {
cols = cols_tmp[i];
}
}
if (cols_tmp != NULL) {
delete[] cols_tmp;
cols_tmp = NULL;
}
// If not ordered, order it as per row
if (!ordered_rows) {
util::omp_sort(coo, coo_edges, RowFirstTupleCompare<Tuple>);
rows = coo[coo_edges - 1].row;
}
SizeT final_edges = coo_edges;
if (self_loops == false) {
coo = RemoveSelfLoops<VertexId>(coo, final_edges, coo_edges);
}
cpu_timer.Stop();
if (!quiet) {
printf("Time Elapsed for sorting %s is %f ms\n\n",
(self_loops == true) ? "" : "and removing self loops",
cpu_timer.ElapsedMillis());
printf(
"Number of edges %lld, Number of rows %lld and Number of columns "
"%lld\n\n",
(long long)final_edges, (long long)rows, (long long)cols);
}
if (f != NULL) {
fprintf(f, "%%MatrixMarket matrix coordinate pattern %s\n",
(undirected == true) ? "symmetric" : "");
fprintf(f, "%lld %lld %lld\n", (long long)rows, (long long)cols,
(long long)final_edges);
for (SizeT i = 0; i < final_edges; i++) {
fprintf(f, "%lld %lld\n", (long long)(coo[i].row),
(long long)(coo[i].col));
}
}
return coo;
}
template <typename VertexId, typename SizeT, typename Value>
int main_(CommandLineArgs *args) {
// hipError_t retval = hipSuccess;
CpuTimer cpu_timer, cpu_timer2;
SizeT rmat_nodes = 1 << 10;
SizeT rmat_edges = 1 << 10;
SizeT rmat_scale = 10;
SizeT rmat_edgefactor = 48;
double rmat_a = 0.57;
double rmat_b = 0.19;
double rmat_c = 0.19;
double rmat_d = 1 - (rmat_a + rmat_b + rmat_c);
double rmat_vmin = 1;
double rmat_vmultipiler = 64;
int rmat_seed = -1;
bool undirected = false;
bool self_loops = false;
SizeT rmat_all_edges = rmat_edges;
std::string file_name;
bool quiet = false;
typedef Coo<VertexId, Value> EdgeTupleType;
cpu_timer.Start();
if (args->CheckCmdLineFlag("rmat_scale") &&
args->CheckCmdLineFlag("rmat_nodes")) {
printf("Please mention scale or nodes, not both \n");
return hipErrorInvalidConfiguration;
} else if (args->CheckCmdLineFlag("rmat_edgefactor") &&
args->CheckCmdLineFlag("rmat_edges")) {
printf("Please mention edgefactor or edge, not both \n");
return hipErrorInvalidConfiguration;
}
self_loops = args->CheckCmdLineFlag("rmat_self_loops");
// graph construction or generation related parameters
if (args->CheckCmdLineFlag("normalized"))
undirected = args->CheckCmdLineFlag("rmat_undirected");
else
undirected = true; // require undirected input graph when unnormalized
quiet = args->CheckCmdLineFlag("quiet");
args->GetCmdLineArgument("rmat_scale", rmat_scale);
rmat_nodes = 1 << rmat_scale;
args->GetCmdLineArgument("rmat_nodes", rmat_nodes);
args->GetCmdLineArgument("rmat_edgefactor", rmat_edgefactor);
rmat_edges = rmat_nodes * rmat_edgefactor;
args->GetCmdLineArgument("rmat_edges", rmat_edges);
args->GetCmdLineArgument("rmat_a", rmat_a);
args->GetCmdLineArgument("rmat_b", rmat_b);
args->GetCmdLineArgument("rmat_c", rmat_c);
rmat_d = 1 - (rmat_a + rmat_b + rmat_c);
args->GetCmdLineArgument("rmat_d", rmat_d);
args->GetCmdLineArgument("rmat_seed", rmat_seed);
args->GetCmdLineArgument("rmat_vmin", rmat_vmin);
args->GetCmdLineArgument("rmat_vmultipiler", rmat_vmultipiler);
args->GetCmdLineArgument("file_name", file_name);
Coo<VertexId, Value> *coo = NULL;
if (undirected == true) {
rmat_all_edges = 2 * rmat_edges;
} else {
rmat_all_edges = rmat_edges;
}
std::vector<int> temp_devices;
if (args->CheckCmdLineFlag("device")) // parse device list
{
args->GetCmdLineArguments<int>("device", temp_devices);
} else // use single device with index 0
{
int gpu_idx;
util::GRError(hipGetDevice(&gpu_idx), "hipGetDevice failed", __FILE__,
__LINE__);
temp_devices.push_back(gpu_idx);
}
int *gpu_idx = new int[temp_devices.size()];
for (int i = 0; i < temp_devices.size(); i++) gpu_idx[i] = temp_devices[i];
if (!quiet) {
printf(
"---------Graph properties-------\n"
" Undirected : %s\n"
" Nodes : %lld\n"
" Edges : %lld\n"
" a = %f, b = %f, c = %f, d = %f\n\n\n",
((undirected == true) ? "True" : "False"), (long long)rmat_nodes,
(long long)(rmat_edges * ((undirected == true) ? 2 : 1)), rmat_a,
rmat_b, rmat_c, rmat_d);
}
cpu_timer2.Start();
coo =
(Coo<VertexId, Value> *)BuildRmatGraph_coo<true, VertexId, SizeT, Value>(
rmat_nodes, rmat_edges, undirected, rmat_a, rmat_b, rmat_c, rmat_d,
rmat_vmultipiler, rmat_vmin, rmat_seed, quiet, temp_devices.size(),
gpu_idx);
cpu_timer2.Stop();
if (coo != NULL) {
if (!quiet) printf("Graph has been generated \n");
} else {
return hipErrorMemoryAllocation;
}
FILE *f = NULL;
if (!(file_name.empty())) {
f = fopen(file_name.c_str(), "w");
if (f == NULL) {
if (!quiet) printf("Error: File path doesn't exist \n");
}
// Convert the COO format to Matrix Market format
if (!quiet) printf("Converting the COO format to Matrix Market format \n");
}
coo = FromCoo_MM<VertexId>(f, coo, rmat_nodes, rmat_all_edges, false,
undirected, quiet, self_loops);
if (f != NULL) {
fclose(f);
if (!quiet) printf("Converted the COO format to Matrix Market format\n");
}
cpu_timer.Stop();
if (coo == NULL) {
if (!quiet) printf("Error: Failed to create the Graph \n");
return hipErrorMemoryAllocation;
} else {
if (!quiet)
printf(
"Time to generate the graph %f ms\n"
"Total time %f ms\n",
cpu_timer2.ElapsedMillis(), cpu_timer.ElapsedMillis());
free(coo);
}
return hipSuccess;
}
template <typename VertexId, // the vertex identifier type, usually int or long
// long
typename SizeT>
int main_Value(CommandLineArgs *args) {
// can be disabled to reduce compile time
// if (args -> CheckCmdLineFlag("64bit-Value"))
// return main_<VertexId, SizeT, double>(args);
// else
return main_<VertexId, SizeT, float>(args);
}
template <typename VertexId>
int main_SizeT(CommandLineArgs *args) {
// can be disabled to reduce compile time
if (args->CheckCmdLineFlag("64bit-SizeT") || sizeof(VertexId) > 4)
return main_Value<VertexId, long long>(args);
else
return main_Value<VertexId, int>(args);
}
int main_VertexId(CommandLineArgs *args) {
// can be disabled to reduce compile time
if (args->CheckCmdLineFlag("64bit-VertexId"))
return main_SizeT<long long>(args);
else
return main_SizeT<int>(args);
}
int main(int argc, char **argv) {
CommandLineArgs args(argc, argv);
int graph_args = argc - args.ParsedArgc() - 1;
if (argc < 2 || args.CheckCmdLineFlag("help")) {
Usage();
return 1;
}
return main_VertexId(&args);
}
| 79a3146b6e005fd189a2351539fb133be17fdb64.cu | #include <stdio.h>
#include <string>
#include <omp.h>
// Utilities and correctness-checking
#include <gunrock/util/multithread_utils.cuh>
#include <gunrock/util/sort_omp.cuh>
#include <gunrock/csr.cuh>
#include <gunrock/graphio/grmat.cuh>
#include <gunrock/coo.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <gunrock/util/shared_utils.cuh>
using namespace gunrock;
// using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::graphio;
using namespace gunrock::graphio::grmat;
void Usage() {
printf(
"test <graph-type> [graph-type-arguments]\n"
"Graph type and graph type arguments:\n"
" market <matrix-market-file-name>\n"
" Reads a Matrix-Market coordinate-formatted graph of\n"
" directed/undirected edges from STDIN (or from the\n"
" optionally-specified file).\n"
" rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n"
" Generate R-MAT graph as input\n"
" --rmat_scale=<vertex-scale>\n"
" --rmat_nodes=<number-nodes>\n"
" --rmat_edgefactor=<edge-factor>\n"
" --rmat_edges=<number-edges>\n"
" --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n"
" --rmat_seed=<seed>\n"
" --rmat_self_loops If this option is supplied, then self loops "
"will be retained\n"
" --rmat_undirected If this option is not mentioned, then the "
"graps will be undirected\n\n"
"Optional arguments:\n"
"[--file_name=<file name>] If the graph needs to be saved to a file, "
"else it will not be saved physically.\n"
"[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n"
"[--quiet] No output (unless --json is specified).\n"
"[--normalized]\n");
}
template <typename VertexId, typename Tuple, typename SizeT>
Tuple *RemoveSelfLoops(Tuple *coo, SizeT &final_edges, SizeT coo_edges) {
Tuple *new_coo = (Tuple *)malloc(sizeof(Tuple) * coo_edges);
int num_threads = 1;
SizeT *edge_counts = NULL;
Tuple **new_coo_arr = NULL;
final_edges = 0;
#pragma omp parallel
{
num_threads = omp_get_num_threads();
int thread_num = omp_get_thread_num();
if (thread_num == 0) {
edge_counts = new SizeT[num_threads + 1];
new_coo_arr = new Tuple *[num_threads + 1];
}
#pragma omp barrier
SizeT edge_start = (long long)(coo_edges)*thread_num / num_threads;
SizeT edge_end = (long long)(coo_edges) * (thread_num + 1) / num_threads;
new_coo_arr[thread_num] =
(Tuple *)malloc(sizeof(Tuple) * (edge_end - edge_start));
SizeT edge = edge_start;
SizeT new_edge = 0;
for (edge = edge_start; edge < edge_end; edge++) {
VertexId col = coo[edge].col;
VertexId row = coo[edge].row;
if ((col != row) &&
(edge == 0 || col != coo[edge - 1].col || row != coo[edge - 1].row)) {
new_coo_arr[thread_num][new_edge].col = (VertexId)col;
new_coo_arr[thread_num][new_edge].row = (VertexId)row;
new_coo_arr[thread_num][new_edge].val = coo[edge].val;
new_edge++;
}
}
edge_counts[thread_num] = new_edge;
}
SizeT edge = 0;
for (int i = 0; i < num_threads; i++) {
for (SizeT tmp_edge = 0; tmp_edge < edge_counts[i]; tmp_edge++) {
new_coo[edge].row = new_coo_arr[i][tmp_edge].row;
new_coo[edge].col = new_coo_arr[i][tmp_edge].col;
new_coo[edge].val = new_coo_arr[i][tmp_edge].val;
edge++;
}
free(new_coo_arr[i]);
}
delete[] new_coo_arr;
new_coo_arr = NULL;
final_edges = edge;
delete[] edge_counts;
edge_counts = NULL;
free(coo);
coo = new_coo;
return coo;
}
/**
* @brief Modify COO graph as per the expectation of user
*
* @param[in] output_file Output file to dump the graph topology info
* @param[in] coo Pointer to COO-format graph
* @param[in] coo_nodes Number of nodes in COO-format graph
* @param[in] coo_edges Number of edges in COO-format graph
* @param[in] ordered_rows Are the rows sorted? If not, sort them.
* @param[in] undirected Is the graph directed or not?
* @param[in] reversed Is the graph reversed or not?
* @param[in] quiet Don't print out anything.
* @param[in] self_loops is true if self loops are accepted in the graph.
*
* Default: Assume rows are not sorted.
*/
template <typename VertexId, typename Tuple, typename SizeT>
Tuple *FromCoo_MM(FILE *f, Tuple *coo, SizeT coo_nodes, SizeT coo_edges,
bool ordered_rows = false, bool undirected = false,
bool quiet = false, bool self_loops = false) {
SizeT rows = 0;
SizeT cols = 0;
util::CpuTimer cpu_timer;
if ((!quiet) && (!self_loops)) {
printf(
"Converting %lld vertices, %lld %s edges ( %s tuples) "
"to remove self loops...\n",
(long long)coo_nodes, (long long)coo_edges,
undirected ? "undirected" : "directed",
ordered_rows ? "ordered" : "unordered");
}
cpu_timer.Start();
int num_threads = 1;
SizeT *cols_tmp = NULL;
// Find the max col
#pragma omp parallel
{
num_threads = omp_get_num_threads();
int thread_num = omp_get_thread_num();
if (thread_num == 0) {
cols_tmp = new SizeT[num_threads + 1];
}
#pragma omp barrier
SizeT edge_start = (long long)(coo_edges)*thread_num / num_threads;
SizeT edge_end = (long long)(coo_edges) * (thread_num + 1) / num_threads;
SizeT edge = edge_start;
SizeT max = coo[edge_start].col;
for (edge = edge_start + 1; edge < edge_end; edge++) {
if (max < coo[edge].col) {
max = coo[edge].col;
}
}
cols_tmp[thread_num] = max;
}
cols = cols_tmp[0];
for (int i = 1; i < num_threads; i++) {
if (cols < cols_tmp[i]) {
cols = cols_tmp[i];
}
}
if (cols_tmp != NULL) {
delete[] cols_tmp;
cols_tmp = NULL;
}
// If not ordered, order it as per row
if (!ordered_rows) {
util::omp_sort(coo, coo_edges, RowFirstTupleCompare<Tuple>);
rows = coo[coo_edges - 1].row;
}
SizeT final_edges = coo_edges;
if (self_loops == false) {
coo = RemoveSelfLoops<VertexId>(coo, final_edges, coo_edges);
}
cpu_timer.Stop();
if (!quiet) {
printf("Time Elapsed for sorting %s is %f ms\n\n",
(self_loops == true) ? "" : "and removing self loops",
cpu_timer.ElapsedMillis());
printf(
"Number of edges %lld, Number of rows %lld and Number of columns "
"%lld\n\n",
(long long)final_edges, (long long)rows, (long long)cols);
}
if (f != NULL) {
fprintf(f, "%%MatrixMarket matrix coordinate pattern %s\n",
(undirected == true) ? "symmetric" : "");
fprintf(f, "%lld %lld %lld\n", (long long)rows, (long long)cols,
(long long)final_edges);
for (SizeT i = 0; i < final_edges; i++) {
fprintf(f, "%lld %lld\n", (long long)(coo[i].row),
(long long)(coo[i].col));
}
}
return coo;
}
template <typename VertexId, typename SizeT, typename Value>
int main_(CommandLineArgs *args) {
// cudaError_t retval = cudaSuccess;
CpuTimer cpu_timer, cpu_timer2;
SizeT rmat_nodes = 1 << 10;
SizeT rmat_edges = 1 << 10;
SizeT rmat_scale = 10;
SizeT rmat_edgefactor = 48;
double rmat_a = 0.57;
double rmat_b = 0.19;
double rmat_c = 0.19;
double rmat_d = 1 - (rmat_a + rmat_b + rmat_c);
double rmat_vmin = 1;
double rmat_vmultipiler = 64;
int rmat_seed = -1;
bool undirected = false;
bool self_loops = false;
SizeT rmat_all_edges = rmat_edges;
std::string file_name;
bool quiet = false;
typedef Coo<VertexId, Value> EdgeTupleType;
cpu_timer.Start();
if (args->CheckCmdLineFlag("rmat_scale") &&
args->CheckCmdLineFlag("rmat_nodes")) {
printf("Please mention scale or nodes, not both \n");
return cudaErrorInvalidConfiguration;
} else if (args->CheckCmdLineFlag("rmat_edgefactor") &&
args->CheckCmdLineFlag("rmat_edges")) {
printf("Please mention edgefactor or edge, not both \n");
return cudaErrorInvalidConfiguration;
}
self_loops = args->CheckCmdLineFlag("rmat_self_loops");
// graph construction or generation related parameters
if (args->CheckCmdLineFlag("normalized"))
undirected = args->CheckCmdLineFlag("rmat_undirected");
else
undirected = true; // require undirected input graph when unnormalized
quiet = args->CheckCmdLineFlag("quiet");
args->GetCmdLineArgument("rmat_scale", rmat_scale);
rmat_nodes = 1 << rmat_scale;
args->GetCmdLineArgument("rmat_nodes", rmat_nodes);
args->GetCmdLineArgument("rmat_edgefactor", rmat_edgefactor);
rmat_edges = rmat_nodes * rmat_edgefactor;
args->GetCmdLineArgument("rmat_edges", rmat_edges);
args->GetCmdLineArgument("rmat_a", rmat_a);
args->GetCmdLineArgument("rmat_b", rmat_b);
args->GetCmdLineArgument("rmat_c", rmat_c);
rmat_d = 1 - (rmat_a + rmat_b + rmat_c);
args->GetCmdLineArgument("rmat_d", rmat_d);
args->GetCmdLineArgument("rmat_seed", rmat_seed);
args->GetCmdLineArgument("rmat_vmin", rmat_vmin);
args->GetCmdLineArgument("rmat_vmultipiler", rmat_vmultipiler);
args->GetCmdLineArgument("file_name", file_name);
Coo<VertexId, Value> *coo = NULL;
if (undirected == true) {
rmat_all_edges = 2 * rmat_edges;
} else {
rmat_all_edges = rmat_edges;
}
std::vector<int> temp_devices;
if (args->CheckCmdLineFlag("device")) // parse device list
{
args->GetCmdLineArguments<int>("device", temp_devices);
} else // use single device with index 0
{
int gpu_idx;
util::GRError(cudaGetDevice(&gpu_idx), "cudaGetDevice failed", __FILE__,
__LINE__);
temp_devices.push_back(gpu_idx);
}
int *gpu_idx = new int[temp_devices.size()];
for (int i = 0; i < temp_devices.size(); i++) gpu_idx[i] = temp_devices[i];
if (!quiet) {
printf(
"---------Graph properties-------\n"
" Undirected : %s\n"
" Nodes : %lld\n"
" Edges : %lld\n"
" a = %f, b = %f, c = %f, d = %f\n\n\n",
((undirected == true) ? "True" : "False"), (long long)rmat_nodes,
(long long)(rmat_edges * ((undirected == true) ? 2 : 1)), rmat_a,
rmat_b, rmat_c, rmat_d);
}
cpu_timer2.Start();
coo =
(Coo<VertexId, Value> *)BuildRmatGraph_coo<true, VertexId, SizeT, Value>(
rmat_nodes, rmat_edges, undirected, rmat_a, rmat_b, rmat_c, rmat_d,
rmat_vmultipiler, rmat_vmin, rmat_seed, quiet, temp_devices.size(),
gpu_idx);
cpu_timer2.Stop();
if (coo != NULL) {
if (!quiet) printf("Graph has been generated \n");
} else {
return cudaErrorMemoryAllocation;
}
FILE *f = NULL;
if (!(file_name.empty())) {
f = fopen(file_name.c_str(), "w");
if (f == NULL) {
if (!quiet) printf("Error: File path doesn't exist \n");
}
// Convert the COO format to Matrix Market format
if (!quiet) printf("Converting the COO format to Matrix Market format \n");
}
coo = FromCoo_MM<VertexId>(f, coo, rmat_nodes, rmat_all_edges, false,
undirected, quiet, self_loops);
if (f != NULL) {
fclose(f);
if (!quiet) printf("Converted the COO format to Matrix Market format\n");
}
cpu_timer.Stop();
if (coo == NULL) {
if (!quiet) printf("Error: Failed to create the Graph \n");
return cudaErrorMemoryAllocation;
} else {
if (!quiet)
printf(
"Time to generate the graph %f ms\n"
"Total time %f ms\n",
cpu_timer2.ElapsedMillis(), cpu_timer.ElapsedMillis());
free(coo);
}
return cudaSuccess;
}
template <typename VertexId, // the vertex identifier type, usually int or long
// long
typename SizeT>
int main_Value(CommandLineArgs *args) {
// can be disabled to reduce compile time
// if (args -> CheckCmdLineFlag("64bit-Value"))
// return main_<VertexId, SizeT, double>(args);
// else
return main_<VertexId, SizeT, float>(args);
}
template <typename VertexId>
int main_SizeT(CommandLineArgs *args) {
// can be disabled to reduce compile time
if (args->CheckCmdLineFlag("64bit-SizeT") || sizeof(VertexId) > 4)
return main_Value<VertexId, long long>(args);
else
return main_Value<VertexId, int>(args);
}
int main_VertexId(CommandLineArgs *args) {
// can be disabled to reduce compile time
if (args->CheckCmdLineFlag("64bit-VertexId"))
return main_SizeT<long long>(args);
else
return main_SizeT<int>(args);
}
int main(int argc, char **argv) {
CommandLineArgs args(argc, argv);
int graph_args = argc - args.ParsedArgc() - 1;
if (argc < 2 || args.CheckCmdLineFlag("help")) {
Usage();
return 1;
}
return main_VertexId(&args);
}
|
3c8467aaa5aa3327644559feaba454468844f7b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_affine_channel.h"
#include "hip/hip_fp16.h"
namespace anakin {
namespace saber {
template <typename Dtype>
__global__ void ker_affine_channel_fwd(Dtype * out_data, \
const Dtype* in_data,
const Dtype* scale_data,
const Dtype* bias_data,
const int outer_num,
const int channel,
const int inner_num,
const int count)
{
CUDA_KERNEL_LOOP(tid, count){
const int channel_id = (tid / inner_num) % channel;
out_data[tid] = in_data[tid] * scale_data[channel_id] + bias_data[channel_id];
}
}
template <DataType OpDtype>
SaberStatus SaberAffineChannel<NV, OpDtype>::dispatch(\
const std::vector<Tensor<NV> *>& inputs, \
std::vector<Tensor<NV> *>& outputs, \
AffineChannelParam<NV>& param) {
const OpDataType* in_data = (const OpDataType*)inputs[0]->data();
const OpDataType* scale_data = (const OpDataType*)param.weight()->data();
const OpDataType* bias_data = (const OpDataType*)param.bias()->data();
OpDataType* out_data = (OpDataType*)outputs[0]->mutable_data();
hipStream_t cuda_stream = this->_ctx->get_compute_stream();
int count = outputs[0]->valid_size();
int channel_idx = inputs[0]->channel_index();
int outer_num = inputs[0]->count_valid(0, channel_idx);
int channel = inputs[0]->channel();
int inner_num = inputs[0]->count_valid(channel_idx+1, inputs[0]->dims());
CHECK_EQ(param.weight()->valid_size(), channel) << "affine channel input scale dims are not valid";
CHECK_EQ(param.bias()->valid_size(), channel) << "affine channel input bias dims are not valid";
if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) {
hipLaunchKernelGGL(( ker_affine_channel_fwd<OpDataType>)\
, dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \
out_data, in_data, scale_data, bias_data, outer_num, channel, inner_num,
count);
}
return SaberSuccess;
}
DEFINE_OP_TEMPLATE(SaberAffineChannel, AffineChannelParam, NV, AK_HALF);
DEFINE_OP_TEMPLATE(SaberAffineChannel, AffineChannelParam, NV, AK_INT8);
}
}
| 3c8467aaa5aa3327644559feaba454468844f7b2.cu | #include "saber/funcs/impl/cuda/saber_affine_channel.h"
#include "cuda_fp16.h"
namespace anakin {
namespace saber {
template <typename Dtype>
__global__ void ker_affine_channel_fwd(Dtype * out_data, \
const Dtype* in_data,
const Dtype* scale_data,
const Dtype* bias_data,
const int outer_num,
const int channel,
const int inner_num,
const int count)
{
CUDA_KERNEL_LOOP(tid, count){
const int channel_id = (tid / inner_num) % channel;
out_data[tid] = in_data[tid] * scale_data[channel_id] + bias_data[channel_id];
}
}
template <DataType OpDtype>
SaberStatus SaberAffineChannel<NV, OpDtype>::dispatch(\
const std::vector<Tensor<NV> *>& inputs, \
std::vector<Tensor<NV> *>& outputs, \
AffineChannelParam<NV>& param) {
const OpDataType* in_data = (const OpDataType*)inputs[0]->data();
const OpDataType* scale_data = (const OpDataType*)param.weight()->data();
const OpDataType* bias_data = (const OpDataType*)param.bias()->data();
OpDataType* out_data = (OpDataType*)outputs[0]->mutable_data();
cudaStream_t cuda_stream = this->_ctx->get_compute_stream();
int count = outputs[0]->valid_size();
int channel_idx = inputs[0]->channel_index();
int outer_num = inputs[0]->count_valid(0, channel_idx);
int channel = inputs[0]->channel();
int inner_num = inputs[0]->count_valid(channel_idx+1, inputs[0]->dims());
CHECK_EQ(param.weight()->valid_size(), channel) << "affine channel input scale dims are not valid";
CHECK_EQ(param.bias()->valid_size(), channel) << "affine channel input bias dims are not valid";
if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) {
ker_affine_channel_fwd<OpDataType>\
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\
out_data, in_data, scale_data, bias_data, outer_num, channel, inner_num,
count);
}
return SaberSuccess;
}
DEFINE_OP_TEMPLATE(SaberAffineChannel, AffineChannelParam, NV, AK_HALF);
DEFINE_OP_TEMPLATE(SaberAffineChannel, AffineChannelParam, NV, AK_INT8);
}
}
|
48ffacf7289ca85367d530f64bfb313aa4b1ef59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2017-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "ColorSpace.h"
__constant__ float matYuv2Rgb[3][3];
__constant__ float matRgb2Yuv[3][3];
void inline GetConstants(int iMatrix, float &wr, float &wb, int &black, int &white, int &max) {
black = 16; white = 235;
max = 255;
switch (iMatrix)
{
case ColorSpaceStandard_BT709:
default:
wr = 0.2126f; wb = 0.0722f;
break;
case ColorSpaceStandard_FCC:
wr = 0.30f; wb = 0.11f;
break;
case ColorSpaceStandard_BT470:
case ColorSpaceStandard_BT601:
wr = 0.2990f; wb = 0.1140f;
break;
case ColorSpaceStandard_SMPTE240M:
wr = 0.212f; wb = 0.087f;
break;
case ColorSpaceStandard_BT2020:
case ColorSpaceStandard_BT2020C:
wr = 0.2627f; wb = 0.0593f;
// 10-bit only
black = 64 << 6; white = 940 << 6;
max = (1 << 16) - 1;
break;
}
}
void SetMatYuv2Rgb(int iMatrix) {
float wr, wb;
int black, white, max;
GetConstants(iMatrix, wr, wb, black, white, max);
float mat[3][3] = {
1.0f, 0.0f, (1.0f - wr) / 0.5f,
1.0f, -wb * (1.0f - wb) / 0.5f / (1 - wb - wr), -wr * (1 - wr) / 0.5f / (1 - wb - wr),
1.0f, (1.0f - wb) / 0.5f, 0.0f,
};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
mat[i][j] = (float)(1.0 * max / (white - black) * mat[i][j]);
}
}
hipMemcpyToSymbol(matYuv2Rgb, mat, sizeof(mat));
}
void SetMatRgb2Yuv(int iMatrix) {
float wr, wb;
int black, white, max;
GetConstants(iMatrix, wr, wb, black, white, max);
float mat[3][3] = {
wr, 1.0f - wb - wr, wb,
-0.5f * wr / (1.0f - wb), -0.5f * (1 - wb - wr) / (1.0f - wb), 0.5f,
0.5f, -0.5f * (1.0f - wb - wr) / (1.0f - wr), -0.5f * wb / (1.0f - wr),
};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
mat[i][j] = (float)(1.0 * (white - black) / max * mat[i][j]);
}
}
hipMemcpyToSymbol(matRgb2Yuv, mat, sizeof(mat));
}
template<class T>
__device__ static T Clamp(T x, T lower, T upper) {
return x < lower ? lower : (x > upper ? upper : x);
}
template<class Rgb, class YuvUnit>
__device__ inline Rgb YuvToRgbForPixel(YuvUnit y, YuvUnit u, YuvUnit v) {
const int
low = 1 << (sizeof(YuvUnit) * 8 - 4),
mid = 1 << (sizeof(YuvUnit) * 8 - 1);
float fy = (int)y - low, fu = (int)u - mid, fv = (int)v - mid;
const float maxf = (1 << sizeof(YuvUnit) * 8) - 1.0f;
YuvUnit
r = (YuvUnit)Clamp(matYuv2Rgb[0][0] * fy + matYuv2Rgb[0][1] * fu + matYuv2Rgb[0][2] * fv, 0.0f, maxf),
g = (YuvUnit)Clamp(matYuv2Rgb[1][0] * fy + matYuv2Rgb[1][1] * fu + matYuv2Rgb[1][2] * fv, 0.0f, maxf),
b = (YuvUnit)Clamp(matYuv2Rgb[2][0] * fy + matYuv2Rgb[2][1] * fu + matYuv2Rgb[2][2] * fv, 0.0f, maxf);
Rgb rgb{};
const int nShift = abs((int)sizeof(YuvUnit) - (int)sizeof(rgb.c.r)) * 8;
if (sizeof(YuvUnit) >= sizeof(rgb.c.r)) {
rgb.c.r = r >> nShift;
rgb.c.g = g >> nShift;
rgb.c.b = b >> nShift;
} else {
rgb.c.r = r << nShift;
rgb.c.g = g << nShift;
rgb.c.b = b << nShift;
}
return rgb;
}
template<class YuvUnitx2, class Rgb, class RgbIntx2>
__global__ static void YuvToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch);
YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch);
*(RgbIntx2 *)pDst = RgbIntx2 {
YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y).d,
YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y).d,
};
*(RgbIntx2 *)(pDst + nRgbPitch) = RgbIntx2 {
YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y).d,
YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y).d,
};
}
template<class YuvUnitx2, class Rgb, class RgbIntx2>
__global__ static void Yuv444ToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y);
if (x + 1 >= nWidth || y >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 ch1 = *(YuvUnitx2 *)(pSrc + (nHeight * nYuvPitch));
YuvUnitx2 ch2 = *(YuvUnitx2 *)(pSrc + (2 * nHeight * nYuvPitch));
*(RgbIntx2 *)pDst = RgbIntx2{
YuvToRgbForPixel<Rgb>(l0.x, ch1.x, ch2.x).d,
YuvToRgbForPixel<Rgb>(l0.y, ch1.y, ch2.y).d,
};
}
template<class YuvUnitx2, class Rgb, class RgbUnitx2>
__global__ static void YuvToRgbPlanarKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch);
YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch);
Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y),
rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y),
rgb2 = YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y),
rgb3 = YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y);
uint8_t *pDst = pRgbp + x * sizeof(RgbUnitx2) / 2 + y * nRgbpPitch;
*(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.x, rgb1.v.x};
*(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.x, rgb3.v.x};
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.y, rgb1.v.y};
*(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.y, rgb3.v.y};
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.z, rgb1.v.z};
*(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.z, rgb3.v.z};
}
template<class YuvUnitx2, class Rgb, class RgbUnitx2>
__global__ static void Yuv444ToRgbPlanarKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y);
if (x + 1 >= nWidth || y >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 ch1 = *(YuvUnitx2 *)(pSrc + (nHeight * nYuvPitch));
YuvUnitx2 ch2 = *(YuvUnitx2 *)(pSrc + (2 * nHeight * nYuvPitch));
Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch1.x, ch2.x),
rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch1.y, ch2.y);
uint8_t *pDst = pRgbp + x * sizeof(RgbUnitx2) / 2 + y * nRgbpPitch;
*(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.x, rgb1.v.x };
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.y, rgb1.v.y };
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.z, rgb1.v.z };
}
template <class COLOR32>
void Nv12ToColor32(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( YuvToRgbKernel<uchar2, COLOR32, uint2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void Nv12ToColor64(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( YuvToRgbKernel<uchar2, COLOR64, ulonglong2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444ToColor32(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( Yuv444ToRgbKernel<uchar2, COLOR32, uint2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0,
dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void YUV444ToColor64(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( Yuv444ToRgbKernel<uchar2, COLOR64, ulonglong2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0,
dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void P016ToColor32(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( YuvToRgbKernel<ushort2, COLOR32, uint2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpP016, nP016Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void P016ToColor64(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( YuvToRgbKernel<ushort2, COLOR64, ulonglong2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpP016, nP016Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444P16ToColor32(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( Yuv444ToRgbKernel<ushort2, COLOR32, uint2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0,
dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void YUV444P16ToColor64(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( Yuv444ToRgbKernel<ushort2, COLOR64, ulonglong2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0,
dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void Nv12ToColorPlanar(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( YuvToRgbPlanarKernel<uchar2, COLOR32, uchar2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpNv12, nNv12Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
template <class COLOR32>
void P016ToColorPlanar(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( YuvToRgbPlanarKernel<ushort2, COLOR32, uchar2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpP016, nP016Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444ToColorPlanar(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( Yuv444ToRgbPlanarKernel<uchar2, COLOR32, uchar2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0,
dpYUV444, nPitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444P16ToColorPlanar(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbPlanarKernel<ushort2, COLOR32, uchar2>
<< <dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >> >
(dpYUV444, nPitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
// Explicit Instantiation
template void Nv12ToColor32<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColor32<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColor64<BGRA64>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColor64<RGBA64>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor32<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor32<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor64<BGRA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor64<RGBA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor32<BGRA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor32<RGBA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor64<BGRA64>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor64<RGBA64>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor32<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor32<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor64<BGRA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor64<RGBA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColorPlanar<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColorPlanar<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColorPlanar<BGRA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColorPlanar<RGBA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColorPlanar<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColorPlanar<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColorPlanar<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColorPlanar<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template<class YuvUnit, class RgbUnit>
__device__ inline YuvUnit RgbToY(RgbUnit r, RgbUnit g, RgbUnit b) {
const YuvUnit low = 1 << (sizeof(YuvUnit) * 8 - 4);
return matRgb2Yuv[0][0] * r + matRgb2Yuv[0][1] * g + matRgb2Yuv[0][2] * b + low;
}
template<class YuvUnit, class RgbUnit>
__device__ inline YuvUnit RgbToU(RgbUnit r, RgbUnit g, RgbUnit b) {
const YuvUnit mid = 1 << (sizeof(YuvUnit) * 8 - 1);
return matRgb2Yuv[1][0] * r + matRgb2Yuv[1][1] * g + matRgb2Yuv[1][2] * b + mid;
}
template<class YuvUnit, class RgbUnit>
__device__ inline YuvUnit RgbToV(RgbUnit r, RgbUnit g, RgbUnit b) {
const YuvUnit mid = 1 << (sizeof(YuvUnit) * 8 - 1);
return matRgb2Yuv[2][0] * r + matRgb2Yuv[2][1] * g + matRgb2Yuv[2][2] * b + mid;
}
template<class YuvUnitx2, class Rgb, class RgbIntx2>
__global__ static void RgbToYuvKernel(uint8_t *pRgb, int nRgbPitch, uint8_t *pYuv, int nYuvPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pRgb + x * sizeof(Rgb) + y * nRgbPitch;
RgbIntx2 int2a = *(RgbIntx2 *)pSrc;
RgbIntx2 int2b = *(RgbIntx2 *)(pSrc + nRgbPitch);
Rgb rgb[4] = {int2a.x, int2a.y, int2b.x, int2b.y};
decltype(Rgb::c.r)
r = (rgb[0].c.r + rgb[1].c.r + rgb[2].c.r + rgb[3].c.r) / 4,
g = (rgb[0].c.g + rgb[1].c.g + rgb[2].c.g + rgb[3].c.g) / 4,
b = (rgb[0].c.b + rgb[1].c.b + rgb[2].c.b + rgb[3].c.b) / 4;
uint8_t *pDst = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
*(YuvUnitx2 *)pDst = YuvUnitx2 {
RgbToY<decltype(YuvUnitx2::x)>(rgb[0].c.r, rgb[0].c.g, rgb[0].c.b),
RgbToY<decltype(YuvUnitx2::x)>(rgb[1].c.r, rgb[1].c.g, rgb[1].c.b),
};
*(YuvUnitx2 *)(pDst + nYuvPitch) = YuvUnitx2 {
RgbToY<decltype(YuvUnitx2::x)>(rgb[2].c.r, rgb[2].c.g, rgb[2].c.b),
RgbToY<decltype(YuvUnitx2::x)>(rgb[3].c.r, rgb[3].c.g, rgb[3].c.b),
};
*(YuvUnitx2 *)(pDst + (nHeight - y / 2) * nYuvPitch) = YuvUnitx2 {
RgbToU<decltype(YuvUnitx2::x)>(r, g, b),
RgbToV<decltype(YuvUnitx2::x)>(r, g, b),
};
}
void Bgra64ToP016(uint8_t *dpBgra, int nBgraPitch, uint8_t *dpP016, int nP016Pitch, int nWidth, int nHeight, int iMatrix) {
SetMatRgb2Yuv(iMatrix);
hipLaunchKernelGGL(( RgbToYuvKernel<ushort2, BGRA64, ulonglong2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpBgra, nBgraPitch, dpP016, nP016Pitch, nWidth, nHeight);
}
template<class YuvUnitx2, class Rgb, class RgbUnitx2>
__global__ static void YuvToRgbKernel111(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
unsigned int uiIndex = x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch);
YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch);
Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y),
rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y),
rgb2 = YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y),
rgb3 = YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y);
uint8_t *pDst = pRgbp + uiIndex*3;
int stride = nRgbpPitch*3;
pDst[0] = rgb0.v.x; pDst[1] = rgb0.v.y; pDst[2] = rgb0.v.z;
pDst[3] = rgb1.v.x; pDst[4] = rgb1.v.y; pDst[5] = rgb1.v.z;
pDst[stride+0] = rgb2.v.x; pDst[stride+1] = rgb2.v.y; pDst[stride+2] = rgb2.v.z;
pDst[stride+3] = rgb3.v.x; pDst[stride+4] = rgb3.v.y; pDst[stride+5] = rgb3.v.z;
}
void Nv12ToBgr(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( YuvToRgbKernel111<uchar2, BGRA32, uchar2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpNv12, nNv12Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
| 48ffacf7289ca85367d530f64bfb313aa4b1ef59.cu | /*
* Copyright 2017-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "ColorSpace.h"
__constant__ float matYuv2Rgb[3][3];
__constant__ float matRgb2Yuv[3][3];
void inline GetConstants(int iMatrix, float &wr, float &wb, int &black, int &white, int &max) {
black = 16; white = 235;
max = 255;
switch (iMatrix)
{
case ColorSpaceStandard_BT709:
default:
wr = 0.2126f; wb = 0.0722f;
break;
case ColorSpaceStandard_FCC:
wr = 0.30f; wb = 0.11f;
break;
case ColorSpaceStandard_BT470:
case ColorSpaceStandard_BT601:
wr = 0.2990f; wb = 0.1140f;
break;
case ColorSpaceStandard_SMPTE240M:
wr = 0.212f; wb = 0.087f;
break;
case ColorSpaceStandard_BT2020:
case ColorSpaceStandard_BT2020C:
wr = 0.2627f; wb = 0.0593f;
// 10-bit only
black = 64 << 6; white = 940 << 6;
max = (1 << 16) - 1;
break;
}
}
void SetMatYuv2Rgb(int iMatrix) {
float wr, wb;
int black, white, max;
GetConstants(iMatrix, wr, wb, black, white, max);
float mat[3][3] = {
1.0f, 0.0f, (1.0f - wr) / 0.5f,
1.0f, -wb * (1.0f - wb) / 0.5f / (1 - wb - wr), -wr * (1 - wr) / 0.5f / (1 - wb - wr),
1.0f, (1.0f - wb) / 0.5f, 0.0f,
};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
mat[i][j] = (float)(1.0 * max / (white - black) * mat[i][j]);
}
}
cudaMemcpyToSymbol(matYuv2Rgb, mat, sizeof(mat));
}
void SetMatRgb2Yuv(int iMatrix) {
float wr, wb;
int black, white, max;
GetConstants(iMatrix, wr, wb, black, white, max);
float mat[3][3] = {
wr, 1.0f - wb - wr, wb,
-0.5f * wr / (1.0f - wb), -0.5f * (1 - wb - wr) / (1.0f - wb), 0.5f,
0.5f, -0.5f * (1.0f - wb - wr) / (1.0f - wr), -0.5f * wb / (1.0f - wr),
};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
mat[i][j] = (float)(1.0 * (white - black) / max * mat[i][j]);
}
}
cudaMemcpyToSymbol(matRgb2Yuv, mat, sizeof(mat));
}
template<class T>
__device__ static T Clamp(T x, T lower, T upper) {
return x < lower ? lower : (x > upper ? upper : x);
}
template<class Rgb, class YuvUnit>
__device__ inline Rgb YuvToRgbForPixel(YuvUnit y, YuvUnit u, YuvUnit v) {
const int
low = 1 << (sizeof(YuvUnit) * 8 - 4),
mid = 1 << (sizeof(YuvUnit) * 8 - 1);
float fy = (int)y - low, fu = (int)u - mid, fv = (int)v - mid;
const float maxf = (1 << sizeof(YuvUnit) * 8) - 1.0f;
YuvUnit
r = (YuvUnit)Clamp(matYuv2Rgb[0][0] * fy + matYuv2Rgb[0][1] * fu + matYuv2Rgb[0][2] * fv, 0.0f, maxf),
g = (YuvUnit)Clamp(matYuv2Rgb[1][0] * fy + matYuv2Rgb[1][1] * fu + matYuv2Rgb[1][2] * fv, 0.0f, maxf),
b = (YuvUnit)Clamp(matYuv2Rgb[2][0] * fy + matYuv2Rgb[2][1] * fu + matYuv2Rgb[2][2] * fv, 0.0f, maxf);
Rgb rgb{};
const int nShift = abs((int)sizeof(YuvUnit) - (int)sizeof(rgb.c.r)) * 8;
if (sizeof(YuvUnit) >= sizeof(rgb.c.r)) {
rgb.c.r = r >> nShift;
rgb.c.g = g >> nShift;
rgb.c.b = b >> nShift;
} else {
rgb.c.r = r << nShift;
rgb.c.g = g << nShift;
rgb.c.b = b << nShift;
}
return rgb;
}
template<class YuvUnitx2, class Rgb, class RgbIntx2>
__global__ static void YuvToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch);
YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch);
*(RgbIntx2 *)pDst = RgbIntx2 {
YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y).d,
YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y).d,
};
*(RgbIntx2 *)(pDst + nRgbPitch) = RgbIntx2 {
YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y).d,
YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y).d,
};
}
template<class YuvUnitx2, class Rgb, class RgbIntx2>
__global__ static void Yuv444ToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y);
if (x + 1 >= nWidth || y >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 ch1 = *(YuvUnitx2 *)(pSrc + (nHeight * nYuvPitch));
YuvUnitx2 ch2 = *(YuvUnitx2 *)(pSrc + (2 * nHeight * nYuvPitch));
*(RgbIntx2 *)pDst = RgbIntx2{
YuvToRgbForPixel<Rgb>(l0.x, ch1.x, ch2.x).d,
YuvToRgbForPixel<Rgb>(l0.y, ch1.y, ch2.y).d,
};
}
template<class YuvUnitx2, class Rgb, class RgbUnitx2>
__global__ static void YuvToRgbPlanarKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch);
YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch);
Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y),
rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y),
rgb2 = YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y),
rgb3 = YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y);
uint8_t *pDst = pRgbp + x * sizeof(RgbUnitx2) / 2 + y * nRgbpPitch;
*(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.x, rgb1.v.x};
*(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.x, rgb3.v.x};
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.y, rgb1.v.y};
*(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.y, rgb3.v.y};
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.z, rgb1.v.z};
*(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.z, rgb3.v.z};
}
template<class YuvUnitx2, class Rgb, class RgbUnitx2>
__global__ static void Yuv444ToRgbPlanarKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y);
if (x + 1 >= nWidth || y >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 ch1 = *(YuvUnitx2 *)(pSrc + (nHeight * nYuvPitch));
YuvUnitx2 ch2 = *(YuvUnitx2 *)(pSrc + (2 * nHeight * nYuvPitch));
Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch1.x, ch2.x),
rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch1.y, ch2.y);
uint8_t *pDst = pRgbp + x * sizeof(RgbUnitx2) / 2 + y * nRgbpPitch;
*(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.x, rgb1.v.x };
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.y, rgb1.v.y };
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.z, rgb1.v.z };
}
template <class COLOR32>
void Nv12ToColor32(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
YuvToRgbKernel<uchar2, COLOR32, uint2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void Nv12ToColor64(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
YuvToRgbKernel<uchar2, COLOR64, ulonglong2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444ToColor32(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbKernel<uchar2, COLOR32, uint2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>>
(dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void YUV444ToColor64(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbKernel<uchar2, COLOR64, ulonglong2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>>
(dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void P016ToColor32(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
YuvToRgbKernel<ushort2, COLOR32, uint2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpP016, nP016Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void P016ToColor64(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
YuvToRgbKernel<ushort2, COLOR64, ulonglong2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpP016, nP016Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444P16ToColor32(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbKernel<ushort2, COLOR32, uint2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>>
(dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void YUV444P16ToColor64(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbKernel<ushort2, COLOR64, ulonglong2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>>
(dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void Nv12ToColorPlanar(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
YuvToRgbPlanarKernel<uchar2, COLOR32, uchar2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpNv12, nNv12Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
template <class COLOR32>
void P016ToColorPlanar(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
YuvToRgbPlanarKernel<ushort2, COLOR32, uchar2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpP016, nP016Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444ToColorPlanar(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbPlanarKernel<uchar2, COLOR32, uchar2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>>
(dpYUV444, nPitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444P16ToColorPlanar(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbPlanarKernel<ushort2, COLOR32, uchar2>
<< <dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >> >
(dpYUV444, nPitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
// Explicit Instantiation
template void Nv12ToColor32<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColor32<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColor64<BGRA64>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColor64<RGBA64>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor32<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor32<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor64<BGRA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor64<RGBA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor32<BGRA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor32<RGBA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor64<BGRA64>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor64<RGBA64>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor32<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor32<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor64<BGRA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor64<RGBA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColorPlanar<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColorPlanar<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColorPlanar<BGRA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColorPlanar<RGBA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColorPlanar<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColorPlanar<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColorPlanar<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColorPlanar<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template<class YuvUnit, class RgbUnit>
__device__ inline YuvUnit RgbToY(RgbUnit r, RgbUnit g, RgbUnit b) {
const YuvUnit low = 1 << (sizeof(YuvUnit) * 8 - 4);
return matRgb2Yuv[0][0] * r + matRgb2Yuv[0][1] * g + matRgb2Yuv[0][2] * b + low;
}
template<class YuvUnit, class RgbUnit>
__device__ inline YuvUnit RgbToU(RgbUnit r, RgbUnit g, RgbUnit b) {
const YuvUnit mid = 1 << (sizeof(YuvUnit) * 8 - 1);
return matRgb2Yuv[1][0] * r + matRgb2Yuv[1][1] * g + matRgb2Yuv[1][2] * b + mid;
}
template<class YuvUnit, class RgbUnit>
__device__ inline YuvUnit RgbToV(RgbUnit r, RgbUnit g, RgbUnit b) {
const YuvUnit mid = 1 << (sizeof(YuvUnit) * 8 - 1);
return matRgb2Yuv[2][0] * r + matRgb2Yuv[2][1] * g + matRgb2Yuv[2][2] * b + mid;
}
template<class YuvUnitx2, class Rgb, class RgbIntx2>
__global__ static void RgbToYuvKernel(uint8_t *pRgb, int nRgbPitch, uint8_t *pYuv, int nYuvPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pRgb + x * sizeof(Rgb) + y * nRgbPitch;
RgbIntx2 int2a = *(RgbIntx2 *)pSrc;
RgbIntx2 int2b = *(RgbIntx2 *)(pSrc + nRgbPitch);
Rgb rgb[4] = {int2a.x, int2a.y, int2b.x, int2b.y};
decltype(Rgb::c.r)
r = (rgb[0].c.r + rgb[1].c.r + rgb[2].c.r + rgb[3].c.r) / 4,
g = (rgb[0].c.g + rgb[1].c.g + rgb[2].c.g + rgb[3].c.g) / 4,
b = (rgb[0].c.b + rgb[1].c.b + rgb[2].c.b + rgb[3].c.b) / 4;
uint8_t *pDst = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
*(YuvUnitx2 *)pDst = YuvUnitx2 {
RgbToY<decltype(YuvUnitx2::x)>(rgb[0].c.r, rgb[0].c.g, rgb[0].c.b),
RgbToY<decltype(YuvUnitx2::x)>(rgb[1].c.r, rgb[1].c.g, rgb[1].c.b),
};
*(YuvUnitx2 *)(pDst + nYuvPitch) = YuvUnitx2 {
RgbToY<decltype(YuvUnitx2::x)>(rgb[2].c.r, rgb[2].c.g, rgb[2].c.b),
RgbToY<decltype(YuvUnitx2::x)>(rgb[3].c.r, rgb[3].c.g, rgb[3].c.b),
};
*(YuvUnitx2 *)(pDst + (nHeight - y / 2) * nYuvPitch) = YuvUnitx2 {
RgbToU<decltype(YuvUnitx2::x)>(r, g, b),
RgbToV<decltype(YuvUnitx2::x)>(r, g, b),
};
}
void Bgra64ToP016(uint8_t *dpBgra, int nBgraPitch, uint8_t *dpP016, int nP016Pitch, int nWidth, int nHeight, int iMatrix) {
SetMatRgb2Yuv(iMatrix);
RgbToYuvKernel<ushort2, BGRA64, ulonglong2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpBgra, nBgraPitch, dpP016, nP016Pitch, nWidth, nHeight);
}
template<class YuvUnitx2, class Rgb, class RgbUnitx2>
__global__ static void YuvToRgbKernel111(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
unsigned int uiIndex = x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch);
YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch);
Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y),
rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y),
rgb2 = YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y),
rgb3 = YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y);
uint8_t *pDst = pRgbp + uiIndex*3;
int stride = nRgbpPitch*3;
pDst[0] = rgb0.v.x; pDst[1] = rgb0.v.y; pDst[2] = rgb0.v.z;
pDst[3] = rgb1.v.x; pDst[4] = rgb1.v.y; pDst[5] = rgb1.v.z;
pDst[stride+0] = rgb2.v.x; pDst[stride+1] = rgb2.v.y; pDst[stride+2] = rgb2.v.z;
pDst[stride+3] = rgb3.v.x; pDst[stride+4] = rgb3.v.y; pDst[stride+5] = rgb3.v.z;
}
void Nv12ToBgr(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
YuvToRgbKernel111<uchar2, BGRA32, uchar2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpNv12, nNv12Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
|
a899fb1b9e31bd8668ab40ff18a4bed7ec4ae0d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/core/cuda_devptrs.hpp"
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace softcascade { namespace cudev
{
typedef unsigned int uint;
typedef unsigned short ushort;
// Utility function to extract unsigned chars from an unsigned integer
__device__ uchar4 int_to_uchar4(unsigned int in)
{
uchar4 bytes;
bytes.x = (in & 0x000000ff) >> 0;
bytes.y = (in & 0x0000ff00) >> 8;
bytes.z = (in & 0x00ff0000) >> 16;
bytes.w = (in & 0xff000000) >> 24;
return bytes;
}
__global__ void shfl_integral_horizontal(const cv::gpu::PtrStep<uint4> img, cv::gpu::PtrStep<uint4> integral)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 300)
__shared__ int sums[128];
const int id = threadIdx.x;
const int lane_id = id % warpSize;
const int warp_id = id / warpSize;
const uint4 data = img(blockIdx.x, id);
const uchar4 a = int_to_uchar4(data.x);
const uchar4 b = int_to_uchar4(data.y);
const uchar4 c = int_to_uchar4(data.z);
const uchar4 d = int_to_uchar4(data.w);
int result[16];
result[0] = a.x;
result[1] = result[0] + a.y;
result[2] = result[1] + a.z;
result[3] = result[2] + a.w;
result[4] = result[3] + b.x;
result[5] = result[4] + b.y;
result[6] = result[5] + b.z;
result[7] = result[6] + b.w;
result[8] = result[7] + c.x;
result[9] = result[8] + c.y;
result[10] = result[9] + c.z;
result[11] = result[10] + c.w;
result[12] = result[11] + d.x;
result[13] = result[12] + d.y;
result[14] = result[13] + d.z;
result[15] = result[14] + d.w;
int sum = result[15];
// the prefix sum for each thread's 16 value is computed,
// now the final sums (result[15]) need to be shared
// with the other threads and add. To do this,
// the __shfl_up() instruction is used and a shuffle scan
// operation is performed to distribute the sums to the correct
// threads
#pragma unroll
for (int i = 1; i < 32; i *= 2)
{
const int n = __shfl_up(sum, i, 32);
if (lane_id >= i)
{
#pragma unroll
for (int i = 0; i < 16; ++i)
result[i] += n;
sum += n;
}
}
// Now the final sum for the warp must be shared
// between warps. This is done by each warp
// having a thread store to shared memory, then
// having some other warp load the values and
// compute a prefix sum, again by using __shfl_up.
// The results are uniformly added back to the warps.
// last thread in the warp holding sum of the warp
// places that in shared
if (threadIdx.x % warpSize == warpSize - 1)
sums[warp_id] = result[15];
__syncthreads();
if (warp_id == 0)
{
int warp_sum = sums[lane_id];
#pragma unroll
for (int i = 1; i <= 32; i *= 2)
{
const int n = __shfl_up(warp_sum, i, 32);
if (lane_id >= i)
warp_sum += n;
}
sums[lane_id] = warp_sum;
}
__syncthreads();
int blockSum = 0;
// fold in unused warp
if (warp_id > 0)
{
blockSum = sums[warp_id - 1];
#pragma unroll
for (int i = 0; i < 16; ++i)
result[i] += blockSum;
}
// assemble result
// Each thread has 16 values to write, which are
// now integer data (to avoid overflow). Instead of
// each thread writing consecutive uint4s, the
// approach shown here experiments using
// the shuffle command to reformat the data
// inside the registers so that each thread holds
// consecutive data to be written so larger contiguous
// segments can be assembled for writing.
/*
For example data that needs to be written as
GMEM[16] <- x0 x1 x2 x3 y0 y1 y2 y3 z0 z1 z2 z3 w0 w1 w2 w3
but is stored in registers (r0..r3), in four threads (0..3) as:
threadId 0 1 2 3
r0 x0 y0 z0 w0
r1 x1 y1 z1 w1
r2 x2 y2 z2 w2
r3 x3 y3 z3 w3
after apply __shfl_xor operations to move data between registers r1..r3:
threadId 00 01 10 11
x0 y0 z0 w0
xor(01)->y1 x1 w1 z1
xor(10)->z2 w2 x2 y2
xor(11)->w3 z3 y3 x3
and now x0..x3, and z0..z3 can be written out in order by all threads.
In the current code, each register above is actually representing
four integers to be written as uint4's to GMEM.
*/
result[4] = __shfl_xor(result[4] , 1, 32);
result[5] = __shfl_xor(result[5] , 1, 32);
result[6] = __shfl_xor(result[6] , 1, 32);
result[7] = __shfl_xor(result[7] , 1, 32);
result[8] = __shfl_xor(result[8] , 2, 32);
result[9] = __shfl_xor(result[9] , 2, 32);
result[10] = __shfl_xor(result[10], 2, 32);
result[11] = __shfl_xor(result[11], 2, 32);
result[12] = __shfl_xor(result[12], 3, 32);
result[13] = __shfl_xor(result[13], 3, 32);
result[14] = __shfl_xor(result[14], 3, 32);
result[15] = __shfl_xor(result[15], 3, 32);
uint4* integral_row = integral.ptr(blockIdx.x);
uint4 output;
///////
if (threadIdx.x % 4 == 0)
output = make_uint4(result[0], result[1], result[2], result[3]);
if (threadIdx.x % 4 == 1)
output = make_uint4(result[4], result[5], result[6], result[7]);
if (threadIdx.x % 4 == 2)
output = make_uint4(result[8], result[9], result[10], result[11]);
if (threadIdx.x % 4 == 3)
output = make_uint4(result[12], result[13], result[14], result[15]);
integral_row[threadIdx.x % 4 + (threadIdx.x / 4) * 16] = output;
///////
if (threadIdx.x % 4 == 2)
output = make_uint4(result[0], result[1], result[2], result[3]);
if (threadIdx.x % 4 == 3)
output = make_uint4(result[4], result[5], result[6], result[7]);
if (threadIdx.x % 4 == 0)
output = make_uint4(result[8], result[9], result[10], result[11]);
if (threadIdx.x % 4 == 1)
output = make_uint4(result[12], result[13], result[14], result[15]);
integral_row[(threadIdx.x + 2) % 4 + (threadIdx.x / 4) * 16 + 8] = output;
// continuning from the above example,
// this use of __shfl_xor() places the y0..y3 and w0..w3 data
// in order.
#pragma unroll
for (int i = 0; i < 16; ++i)
result[i] = __shfl_xor(result[i], 1, 32);
if (threadIdx.x % 4 == 0)
output = make_uint4(result[0], result[1], result[2], result[3]);
if (threadIdx.x % 4 == 1)
output = make_uint4(result[4], result[5], result[6], result[7]);
if (threadIdx.x % 4 == 2)
output = make_uint4(result[8], result[9], result[10], result[11]);
if (threadIdx.x % 4 == 3)
output = make_uint4(result[12], result[13], result[14], result[15]);
integral_row[threadIdx.x % 4 + (threadIdx.x / 4) * 16 + 4] = output;
///////
if (threadIdx.x % 4 == 2)
output = make_uint4(result[0], result[1], result[2], result[3]);
if (threadIdx.x % 4 == 3)
output = make_uint4(result[4], result[5], result[6], result[7]);
if (threadIdx.x % 4 == 0)
output = make_uint4(result[8], result[9], result[10], result[11]);
if (threadIdx.x % 4 == 1)
output = make_uint4(result[12], result[13], result[14], result[15]);
integral_row[(threadIdx.x + 2) % 4 + (threadIdx.x / 4) * 16 + 12] = output;
#endif
}
// This kernel computes columnwise prefix sums. When the data input is
// the row sums from above, this completes the integral image.
// The approach here is to have each block compute a local set of sums.
// First , the data covered by the block is loaded into shared memory,
// then instead of performing a sum in shared memory using __syncthreads
// between stages, the data is reformatted so that the necessary sums
// occur inside warps and the shuffle scan operation is used.
// The final set of sums from the block is then propgated, with the block
// computing "down" the image and adding the running sum to the local
// block sums.
__global__ void shfl_integral_vertical(cv::gpu::PtrStepSz<unsigned int> integral)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 300)
__shared__ unsigned int sums[32][9];
const int tidx = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = tidx % 8;
if (tidx >= integral.cols)
return;
sums[threadIdx.x][threadIdx.y] = 0;
__syncthreads();
unsigned int stepSum = 0;
for (int y = threadIdx.y; y < integral.rows; y += blockDim.y)
{
unsigned int* p = integral.ptr(y) + tidx;
unsigned int sum = *p;
sums[threadIdx.x][threadIdx.y] = sum;
__syncthreads();
// place into SMEM
// shfl scan reduce the SMEM, reformating so the column
// sums are computed in a warp
// then read out properly
const int j = threadIdx.x % 8;
const int k = threadIdx.x / 8 + threadIdx.y * 4;
int partial_sum = sums[k][j];
for (int i = 1; i <= 8; i *= 2)
{
int n = __shfl_up(partial_sum, i, 32);
if (lane_id >= i)
partial_sum += n;
}
sums[k][j] = partial_sum;
__syncthreads();
if (threadIdx.y > 0)
sum += sums[threadIdx.x][threadIdx.y - 1];
sum += stepSum;
stepSum += sums[threadIdx.x][blockDim.y - 1];
__syncthreads();
*p = sum;
}
#endif
}
void shfl_integral(const cv::gpu::PtrStepSzb& img, cv::gpu::PtrStepSz<unsigned int> integral, hipStream_t stream)
{
{
// each thread handles 16 values, use 1 block/row
// save, becouse step is actually can't be less 512 bytes
int block = integral.cols / 16;
// launch 1 block / row
const int grid = img.rows;
cudaSafeCall( hipFuncSetCacheConfig(shfl_integral_horizontal, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( shfl_integral_horizontal), dim3(grid), dim3(block), 0, stream, (const cv::gpu::PtrStepSz<uint4>) img, (cv::gpu::PtrStepSz<uint4>) integral);
cudaSafeCall( hipGetLastError() );
}
{
const dim3 block(32, 8);
const dim3 grid(cv::gpu::cudev::divUp(integral.cols, block.x), 1);
hipLaunchKernelGGL(( shfl_integral_vertical), dim3(grid), dim3(block), 0, stream, integral);
cudaSafeCall( hipGetLastError() );
}
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void shfl_integral_vertical(cv::gpu::PtrStepSz<unsigned int> buffer, cv::gpu::PtrStepSz<unsigned int> integral)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 300)
__shared__ unsigned int sums[32][9];
const int tidx = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = tidx % 8;
if (tidx >= integral.cols)
return;
sums[threadIdx.x][threadIdx.y] = 0;
__syncthreads();
unsigned int stepSum = 0;
for (int y = threadIdx.y; y < integral.rows; y += blockDim.y)
{
unsigned int* p = buffer.ptr(y) + tidx;
unsigned int* dst = integral.ptr(y + 1) + tidx + 1;
unsigned int sum = *p;
sums[threadIdx.x][threadIdx.y] = sum;
__syncthreads();
// place into SMEM
// shfl scan reduce the SMEM, reformating so the column
// sums are computed in a warp
// then read out properly
const int j = threadIdx.x % 8;
const int k = threadIdx.x / 8 + threadIdx.y * 4;
int partial_sum = sums[k][j];
for (int i = 1; i <= 8; i *= 2)
{
int n = __shfl_up(partial_sum, i, 32);
if (lane_id >= i)
partial_sum += n;
}
sums[k][j] = partial_sum;
__syncthreads();
if (threadIdx.y > 0)
sum += sums[threadIdx.x][threadIdx.y - 1];
sum += stepSum;
stepSum += sums[threadIdx.x][blockDim.y - 1];
__syncthreads();
*dst = sum;
}
#endif
}
// used for frame preprocessing before Soft Cascade evaluation: no synchronization needed
void shfl_integral_gpu_buffered(cv::gpu::PtrStepSzb img, cv::gpu::PtrStepSz<uint4> buffer, cv::gpu::PtrStepSz<unsigned int> integral,
int blockStep, hipStream_t stream)
{
{
const int block = blockStep;
const int grid = img.rows;
cudaSafeCall( hipFuncSetCacheConfig(shfl_integral_horizontal, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( shfl_integral_horizontal), dim3(grid), dim3(block), 0, stream, (cv::gpu::PtrStepSz<uint4>) img, buffer);
cudaSafeCall( hipGetLastError() );
}
{
const dim3 block(32, 8);
const dim3 grid(cv::gpu::cudev::divUp(integral.cols, block.x), 1);
hipLaunchKernelGGL(( shfl_integral_vertical), dim3(grid), dim3(block), 0, stream, (cv::gpu::PtrStepSz<unsigned int>)buffer, integral);
cudaSafeCall( hipGetLastError() );
}
}
// 0
#define CV_DESCALE(x, n) (((x) + (1 << ((n)-1))) >> (n))
enum
{
yuv_shift = 14,
xyz_shift = 12,
R2Y = 4899,
G2Y = 9617,
B2Y = 1868
};
template <int bidx> static __device__ __forceinline__ unsigned char RGB2GrayConvert(unsigned char b, unsigned char g, unsigned char r)
{
// uint b = 0xffu & (src >> (bidx * 8));
// uint g = 0xffu & (src >> 8);
// uint r = 0xffu & (src >> ((bidx ^ 2) * 8));
return CV_DESCALE((unsigned int)(b * B2Y + g * G2Y + r * R2Y), yuv_shift);
}
__global__ void device_transform(const cv::gpu::PtrStepSz<uchar3> bgr, cv::gpu::PtrStepSzb gray)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const uchar3 colored = (uchar3)(bgr.ptr(y))[x];
gray.ptr(y)[x] = RGB2GrayConvert<0>(colored.x, colored.y, colored.z);
}
///////
void transform(const cv::gpu::PtrStepSz<uchar3>& bgr, cv::gpu::PtrStepSzb gray)
{
const dim3 block(32, 8);
const dim3 grid(cv::gpu::cudev::divUp(bgr.cols, block.x), cv::gpu::cudev::divUp(bgr.rows, block.y));
hipLaunchKernelGGL(( device_transform), dim3(grid), dim3(block), 0, 0, bgr, gray);
cudaSafeCall(hipDeviceSynchronize());
}
}}}
| a899fb1b9e31bd8668ab40ff18a4bed7ec4ae0d4.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/core/cuda_devptrs.hpp"
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace softcascade { namespace cudev
{
typedef unsigned int uint;
typedef unsigned short ushort;
// Utility function to extract unsigned chars from an unsigned integer
__device__ uchar4 int_to_uchar4(unsigned int in)
{
uchar4 bytes;
bytes.x = (in & 0x000000ff) >> 0;
bytes.y = (in & 0x0000ff00) >> 8;
bytes.z = (in & 0x00ff0000) >> 16;
bytes.w = (in & 0xff000000) >> 24;
return bytes;
}
__global__ void shfl_integral_horizontal(const cv::gpu::PtrStep<uint4> img, cv::gpu::PtrStep<uint4> integral)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 300)
__shared__ int sums[128];
const int id = threadIdx.x;
const int lane_id = id % warpSize;
const int warp_id = id / warpSize;
const uint4 data = img(blockIdx.x, id);
const uchar4 a = int_to_uchar4(data.x);
const uchar4 b = int_to_uchar4(data.y);
const uchar4 c = int_to_uchar4(data.z);
const uchar4 d = int_to_uchar4(data.w);
int result[16];
result[0] = a.x;
result[1] = result[0] + a.y;
result[2] = result[1] + a.z;
result[3] = result[2] + a.w;
result[4] = result[3] + b.x;
result[5] = result[4] + b.y;
result[6] = result[5] + b.z;
result[7] = result[6] + b.w;
result[8] = result[7] + c.x;
result[9] = result[8] + c.y;
result[10] = result[9] + c.z;
result[11] = result[10] + c.w;
result[12] = result[11] + d.x;
result[13] = result[12] + d.y;
result[14] = result[13] + d.z;
result[15] = result[14] + d.w;
int sum = result[15];
// the prefix sum for each thread's 16 value is computed,
// now the final sums (result[15]) need to be shared
// with the other threads and add. To do this,
// the __shfl_up() instruction is used and a shuffle scan
// operation is performed to distribute the sums to the correct
// threads
#pragma unroll
for (int i = 1; i < 32; i *= 2)
{
const int n = __shfl_up(sum, i, 32);
if (lane_id >= i)
{
#pragma unroll
for (int i = 0; i < 16; ++i)
result[i] += n;
sum += n;
}
}
// Now the final sum for the warp must be shared
// between warps. This is done by each warp
// having a thread store to shared memory, then
// having some other warp load the values and
// compute a prefix sum, again by using __shfl_up.
// The results are uniformly added back to the warps.
// last thread in the warp holding sum of the warp
// places that in shared
if (threadIdx.x % warpSize == warpSize - 1)
sums[warp_id] = result[15];
__syncthreads();
if (warp_id == 0)
{
int warp_sum = sums[lane_id];
#pragma unroll
for (int i = 1; i <= 32; i *= 2)
{
const int n = __shfl_up(warp_sum, i, 32);
if (lane_id >= i)
warp_sum += n;
}
sums[lane_id] = warp_sum;
}
__syncthreads();
int blockSum = 0;
// fold in unused warp
if (warp_id > 0)
{
blockSum = sums[warp_id - 1];
#pragma unroll
for (int i = 0; i < 16; ++i)
result[i] += blockSum;
}
// assemble result
// Each thread has 16 values to write, which are
// now integer data (to avoid overflow). Instead of
// each thread writing consecutive uint4s, the
// approach shown here experiments using
// the shuffle command to reformat the data
// inside the registers so that each thread holds
// consecutive data to be written so larger contiguous
// segments can be assembled for writing.
/*
For example data that needs to be written as
GMEM[16] <- x0 x1 x2 x3 y0 y1 y2 y3 z0 z1 z2 z3 w0 w1 w2 w3
but is stored in registers (r0..r3), in four threads (0..3) as:
threadId 0 1 2 3
r0 x0 y0 z0 w0
r1 x1 y1 z1 w1
r2 x2 y2 z2 w2
r3 x3 y3 z3 w3
after apply __shfl_xor operations to move data between registers r1..r3:
threadId 00 01 10 11
x0 y0 z0 w0
xor(01)->y1 x1 w1 z1
xor(10)->z2 w2 x2 y2
xor(11)->w3 z3 y3 x3
and now x0..x3, and z0..z3 can be written out in order by all threads.
In the current code, each register above is actually representing
four integers to be written as uint4's to GMEM.
*/
result[4] = __shfl_xor(result[4] , 1, 32);
result[5] = __shfl_xor(result[5] , 1, 32);
result[6] = __shfl_xor(result[6] , 1, 32);
result[7] = __shfl_xor(result[7] , 1, 32);
result[8] = __shfl_xor(result[8] , 2, 32);
result[9] = __shfl_xor(result[9] , 2, 32);
result[10] = __shfl_xor(result[10], 2, 32);
result[11] = __shfl_xor(result[11], 2, 32);
result[12] = __shfl_xor(result[12], 3, 32);
result[13] = __shfl_xor(result[13], 3, 32);
result[14] = __shfl_xor(result[14], 3, 32);
result[15] = __shfl_xor(result[15], 3, 32);
uint4* integral_row = integral.ptr(blockIdx.x);
uint4 output;
///////
if (threadIdx.x % 4 == 0)
output = make_uint4(result[0], result[1], result[2], result[3]);
if (threadIdx.x % 4 == 1)
output = make_uint4(result[4], result[5], result[6], result[7]);
if (threadIdx.x % 4 == 2)
output = make_uint4(result[8], result[9], result[10], result[11]);
if (threadIdx.x % 4 == 3)
output = make_uint4(result[12], result[13], result[14], result[15]);
integral_row[threadIdx.x % 4 + (threadIdx.x / 4) * 16] = output;
///////
if (threadIdx.x % 4 == 2)
output = make_uint4(result[0], result[1], result[2], result[3]);
if (threadIdx.x % 4 == 3)
output = make_uint4(result[4], result[5], result[6], result[7]);
if (threadIdx.x % 4 == 0)
output = make_uint4(result[8], result[9], result[10], result[11]);
if (threadIdx.x % 4 == 1)
output = make_uint4(result[12], result[13], result[14], result[15]);
integral_row[(threadIdx.x + 2) % 4 + (threadIdx.x / 4) * 16 + 8] = output;
// continuning from the above example,
// this use of __shfl_xor() places the y0..y3 and w0..w3 data
// in order.
#pragma unroll
for (int i = 0; i < 16; ++i)
result[i] = __shfl_xor(result[i], 1, 32);
if (threadIdx.x % 4 == 0)
output = make_uint4(result[0], result[1], result[2], result[3]);
if (threadIdx.x % 4 == 1)
output = make_uint4(result[4], result[5], result[6], result[7]);
if (threadIdx.x % 4 == 2)
output = make_uint4(result[8], result[9], result[10], result[11]);
if (threadIdx.x % 4 == 3)
output = make_uint4(result[12], result[13], result[14], result[15]);
integral_row[threadIdx.x % 4 + (threadIdx.x / 4) * 16 + 4] = output;
///////
if (threadIdx.x % 4 == 2)
output = make_uint4(result[0], result[1], result[2], result[3]);
if (threadIdx.x % 4 == 3)
output = make_uint4(result[4], result[5], result[6], result[7]);
if (threadIdx.x % 4 == 0)
output = make_uint4(result[8], result[9], result[10], result[11]);
if (threadIdx.x % 4 == 1)
output = make_uint4(result[12], result[13], result[14], result[15]);
integral_row[(threadIdx.x + 2) % 4 + (threadIdx.x / 4) * 16 + 12] = output;
#endif
}
// This kernel computes columnwise prefix sums. When the data input is
// the row sums from above, this completes the integral image.
// The approach here is to have each block compute a local set of sums.
// First , the data covered by the block is loaded into shared memory,
// then instead of performing a sum in shared memory using __syncthreads
// between stages, the data is reformatted so that the necessary sums
// occur inside warps and the shuffle scan operation is used.
// The final set of sums from the block is then propgated, with the block
// computing "down" the image and adding the running sum to the local
// block sums.
__global__ void shfl_integral_vertical(cv::gpu::PtrStepSz<unsigned int> integral)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 300)
__shared__ unsigned int sums[32][9];
const int tidx = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = tidx % 8;
if (tidx >= integral.cols)
return;
sums[threadIdx.x][threadIdx.y] = 0;
__syncthreads();
unsigned int stepSum = 0;
for (int y = threadIdx.y; y < integral.rows; y += blockDim.y)
{
unsigned int* p = integral.ptr(y) + tidx;
unsigned int sum = *p;
sums[threadIdx.x][threadIdx.y] = sum;
__syncthreads();
// place into SMEM
// shfl scan reduce the SMEM, reformating so the column
// sums are computed in a warp
// then read out properly
const int j = threadIdx.x % 8;
const int k = threadIdx.x / 8 + threadIdx.y * 4;
int partial_sum = sums[k][j];
for (int i = 1; i <= 8; i *= 2)
{
int n = __shfl_up(partial_sum, i, 32);
if (lane_id >= i)
partial_sum += n;
}
sums[k][j] = partial_sum;
__syncthreads();
if (threadIdx.y > 0)
sum += sums[threadIdx.x][threadIdx.y - 1];
sum += stepSum;
stepSum += sums[threadIdx.x][blockDim.y - 1];
__syncthreads();
*p = sum;
}
#endif
}
void shfl_integral(const cv::gpu::PtrStepSzb& img, cv::gpu::PtrStepSz<unsigned int> integral, cudaStream_t stream)
{
{
// each thread handles 16 values, use 1 block/row
// save, becouse step is actually can't be less 512 bytes
int block = integral.cols / 16;
// launch 1 block / row
const int grid = img.rows;
cudaSafeCall( cudaFuncSetCacheConfig(shfl_integral_horizontal, cudaFuncCachePreferL1) );
shfl_integral_horizontal<<<grid, block, 0, stream>>>((const cv::gpu::PtrStepSz<uint4>) img, (cv::gpu::PtrStepSz<uint4>) integral);
cudaSafeCall( cudaGetLastError() );
}
{
const dim3 block(32, 8);
const dim3 grid(cv::gpu::cudev::divUp(integral.cols, block.x), 1);
shfl_integral_vertical<<<grid, block, 0, stream>>>(integral);
cudaSafeCall( cudaGetLastError() );
}
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void shfl_integral_vertical(cv::gpu::PtrStepSz<unsigned int> buffer, cv::gpu::PtrStepSz<unsigned int> integral)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 300)
__shared__ unsigned int sums[32][9];
const int tidx = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = tidx % 8;
if (tidx >= integral.cols)
return;
sums[threadIdx.x][threadIdx.y] = 0;
__syncthreads();
unsigned int stepSum = 0;
for (int y = threadIdx.y; y < integral.rows; y += blockDim.y)
{
unsigned int* p = buffer.ptr(y) + tidx;
unsigned int* dst = integral.ptr(y + 1) + tidx + 1;
unsigned int sum = *p;
sums[threadIdx.x][threadIdx.y] = sum;
__syncthreads();
// place into SMEM
// shfl scan reduce the SMEM, reformating so the column
// sums are computed in a warp
// then read out properly
const int j = threadIdx.x % 8;
const int k = threadIdx.x / 8 + threadIdx.y * 4;
int partial_sum = sums[k][j];
for (int i = 1; i <= 8; i *= 2)
{
int n = __shfl_up(partial_sum, i, 32);
if (lane_id >= i)
partial_sum += n;
}
sums[k][j] = partial_sum;
__syncthreads();
if (threadIdx.y > 0)
sum += sums[threadIdx.x][threadIdx.y - 1];
sum += stepSum;
stepSum += sums[threadIdx.x][blockDim.y - 1];
__syncthreads();
*dst = sum;
}
#endif
}
// used for frame preprocessing before Soft Cascade evaluation: no synchronization needed
void shfl_integral_gpu_buffered(cv::gpu::PtrStepSzb img, cv::gpu::PtrStepSz<uint4> buffer, cv::gpu::PtrStepSz<unsigned int> integral,
int blockStep, cudaStream_t stream)
{
{
const int block = blockStep;
const int grid = img.rows;
cudaSafeCall( cudaFuncSetCacheConfig(shfl_integral_horizontal, cudaFuncCachePreferL1) );
shfl_integral_horizontal<<<grid, block, 0, stream>>>((cv::gpu::PtrStepSz<uint4>) img, buffer);
cudaSafeCall( cudaGetLastError() );
}
{
const dim3 block(32, 8);
const dim3 grid(cv::gpu::cudev::divUp(integral.cols, block.x), 1);
shfl_integral_vertical<<<grid, block, 0, stream>>>((cv::gpu::PtrStepSz<unsigned int>)buffer, integral);
cudaSafeCall( cudaGetLastError() );
}
}
// 0
#define CV_DESCALE(x, n) (((x) + (1 << ((n)-1))) >> (n))
enum
{
yuv_shift = 14,
xyz_shift = 12,
R2Y = 4899,
G2Y = 9617,
B2Y = 1868
};
template <int bidx> static __device__ __forceinline__ unsigned char RGB2GrayConvert(unsigned char b, unsigned char g, unsigned char r)
{
// uint b = 0xffu & (src >> (bidx * 8));
// uint g = 0xffu & (src >> 8);
// uint r = 0xffu & (src >> ((bidx ^ 2) * 8));
return CV_DESCALE((unsigned int)(b * B2Y + g * G2Y + r * R2Y), yuv_shift);
}
__global__ void device_transform(const cv::gpu::PtrStepSz<uchar3> bgr, cv::gpu::PtrStepSzb gray)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const uchar3 colored = (uchar3)(bgr.ptr(y))[x];
gray.ptr(y)[x] = RGB2GrayConvert<0>(colored.x, colored.y, colored.z);
}
///////
void transform(const cv::gpu::PtrStepSz<uchar3>& bgr, cv::gpu::PtrStepSzb gray)
{
const dim3 block(32, 8);
const dim3 grid(cv::gpu::cudev::divUp(bgr.cols, block.x), cv::gpu::cudev::divUp(bgr.rows, block.y));
device_transform<<<grid, block>>>(bgr, gray);
cudaSafeCall(cudaDeviceSynchronize());
}
}}}
|
ef1f7b03de2ce33e3bc8408b7715230e4c1202ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_generate_chunk_kernel;
int xdim0_generate_chunk_kernel_h = -1;
__constant__ int ydim0_generate_chunk_kernel;
int ydim0_generate_chunk_kernel_h = -1;
__constant__ int xdim1_generate_chunk_kernel;
int xdim1_generate_chunk_kernel_h = -1;
__constant__ int ydim1_generate_chunk_kernel;
int ydim1_generate_chunk_kernel_h = -1;
__constant__ int xdim2_generate_chunk_kernel;
int xdim2_generate_chunk_kernel_h = -1;
__constant__ int ydim2_generate_chunk_kernel;
int ydim2_generate_chunk_kernel_h = -1;
__constant__ int xdim3_generate_chunk_kernel;
int xdim3_generate_chunk_kernel_h = -1;
__constant__ int ydim3_generate_chunk_kernel;
int ydim3_generate_chunk_kernel_h = -1;
__constant__ int xdim4_generate_chunk_kernel;
int xdim4_generate_chunk_kernel_h = -1;
__constant__ int ydim4_generate_chunk_kernel;
int ydim4_generate_chunk_kernel_h = -1;
__constant__ int xdim5_generate_chunk_kernel;
int xdim5_generate_chunk_kernel_h = -1;
__constant__ int ydim5_generate_chunk_kernel;
int ydim5_generate_chunk_kernel_h = -1;
__constant__ int xdim6_generate_chunk_kernel;
int xdim6_generate_chunk_kernel_h = -1;
__constant__ int ydim6_generate_chunk_kernel;
int ydim6_generate_chunk_kernel_h = -1;
__constant__ int xdim7_generate_chunk_kernel;
int xdim7_generate_chunk_kernel_h = -1;
__constant__ int ydim7_generate_chunk_kernel;
int ydim7_generate_chunk_kernel_h = -1;
__constant__ int xdim8_generate_chunk_kernel;
int xdim8_generate_chunk_kernel_h = -1;
__constant__ int ydim8_generate_chunk_kernel;
int ydim8_generate_chunk_kernel_h = -1;
__constant__ int xdim9_generate_chunk_kernel;
int xdim9_generate_chunk_kernel_h = -1;
__constant__ int ydim9_generate_chunk_kernel;
int ydim9_generate_chunk_kernel_h = -1;
__constant__ int xdim10_generate_chunk_kernel;
int xdim10_generate_chunk_kernel_h = -1;
__constant__ int ydim10_generate_chunk_kernel;
int ydim10_generate_chunk_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
#define OPS_ACC0(x,y,z) (x+xdim0_generate_chunk_kernel*(y)+xdim0_generate_chunk_kernel*ydim0_generate_chunk_kernel*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_generate_chunk_kernel*(y)+xdim1_generate_chunk_kernel*ydim1_generate_chunk_kernel*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_generate_chunk_kernel*(y)+xdim2_generate_chunk_kernel*ydim2_generate_chunk_kernel*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_generate_chunk_kernel*(y)+xdim3_generate_chunk_kernel*ydim3_generate_chunk_kernel*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_generate_chunk_kernel*(y)+xdim4_generate_chunk_kernel*ydim4_generate_chunk_kernel*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_generate_chunk_kernel*(y)+xdim5_generate_chunk_kernel*ydim5_generate_chunk_kernel*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_generate_chunk_kernel*(y)+xdim6_generate_chunk_kernel*ydim6_generate_chunk_kernel*(z))
#define OPS_ACC7(x,y,z) (x+xdim7_generate_chunk_kernel*(y)+xdim7_generate_chunk_kernel*ydim7_generate_chunk_kernel*(z))
#define OPS_ACC8(x,y,z) (x+xdim8_generate_chunk_kernel*(y)+xdim8_generate_chunk_kernel*ydim8_generate_chunk_kernel*(z))
#define OPS_ACC9(x,y,z) (x+xdim9_generate_chunk_kernel*(y)+xdim9_generate_chunk_kernel*ydim9_generate_chunk_kernel*(z))
#define OPS_ACC10(x,y,z) (x+xdim10_generate_chunk_kernel*(y)+xdim10_generate_chunk_kernel*ydim10_generate_chunk_kernel*(z))
//user function
__device__
void generate_chunk_kernel_gpu( const double *vertexx,
const double *vertexy, const double *vertexz,
double *energy0, double *density0,
double *xvel0, double *yvel0, double *zvel0,
const double *cellx, const double *celly, const double *cellz) {
double radius, x_cent, y_cent, z_cent;
int is_in = 0;
energy0[OPS_ACC3(0,0,0)]= states[0].energy;
density0[OPS_ACC4(0,0,0)]= states[0].density;
xvel0[OPS_ACC5(0,0,0)]=states[0].xvel;
yvel0[OPS_ACC6(0,0,0)]=states[0].yvel;
zvel0[OPS_ACC7(0,0,0)]=states[0].zvel;
for(int i = 1; i<number_of_states; i++) {
x_cent=states[i].xmin;
y_cent=states[i].ymin;
z_cent=states[i].zmin;
if (states[i].geometry == g_cube) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
for (int k1 = -1; k1 <= 0; k1++) {
if(vertexx[OPS_ACC0(1+i1,0,0)] >= states[i].xmin && vertexx[OPS_ACC0(0+i1,0,0)] < states[i].xmax) {
if(vertexy[OPS_ACC1(0,1+j1,0)] >= states[i].ymin && vertexy[OPS_ACC1(0,0+j1,0)] < states[i].ymax) {
if(vertexz[OPS_ACC2(0,0,1+k1)] >= states[i].zmin && vertexz[OPS_ACC2(0,0,0+k1)] < states[i].zmax) {
is_in=1;
}
}
}
}
}
}
if(vertexx[OPS_ACC0(1,0,0)] >= states[i].xmin && vertexx[OPS_ACC0(0,0,0)] < states[i].xmax) {
if(vertexy[OPS_ACC1(0,1,0)] >= states[i].ymin && vertexy[OPS_ACC1(0,0,0)] < states[i].ymax) {
if(vertexz[OPS_ACC2(0,0,1)] >= states[i].zmin && vertexz[OPS_ACC2(0,0,0)] < states[i].zmax) {
energy0[OPS_ACC3(0,0,0)] = states[i].energy;
density0[OPS_ACC4(0,0,0)] = states[i].density;
}
}
}
if (is_in) {
xvel0[OPS_ACC5(0,0,0)] = states[i].xvel;
yvel0[OPS_ACC6(0,0,0)] = states[i].yvel;
zvel0[OPS_ACC7(0,0,0)] = states[i].zvel;
}
}
else if(states[i].geometry == g_sphe) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
for (int k1 = -1; k1 <= 0; k1++) {
radius = sqrt ((cellx[OPS_ACC8(0,0,0)] - x_cent) * (cellx[OPS_ACC8(0,0,0)] - x_cent) +
(celly[OPS_ACC9(0,0,0)] - y_cent) * (celly[OPS_ACC9(0,0,0)] - y_cent) +
(cellz[OPS_ACC10(0,0,0)] - z_cent) * (cellz[OPS_ACC10(0,0,0)] - z_cent));
if(radius <= states[i].radius) is_in = 1;
}
}
}
if(radius <= states[i].radius) {
energy0[OPS_ACC3(0,0,0)] = states[i].energy;
density0[OPS_ACC4(0,0,0)] = states[i].density;
}
if (is_in) {
xvel0[OPS_ACC5(0,0,0)] = states[i].xvel;
yvel0[OPS_ACC6(0,0,0)] = states[i].yvel;
zvel0[OPS_ACC7(0,0,0)] = states[i].zvel;
}
}
else if(states[i].geometry == g_point) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
for (int k1 = -1; k1 <= 0; k1++) {
if(vertexx[OPS_ACC0(0+i1,0,0)] == x_cent && vertexy[OPS_ACC1(0,0+j1,0)] == y_cent && vertexz[OPS_ACC2(0,0,0+k1)] == z_cent)
is_in = 1;
}
}
}
if(vertexx[OPS_ACC0(0,0,0)] == x_cent && vertexy[OPS_ACC1(0,0,0)] == y_cent && vertexz[OPS_ACC2(0,0,0)] == z_cent) {
energy0[OPS_ACC3(0,0,0)] = states[i].energy;
density0[OPS_ACC4(0,0,0)] = states[i].density;
}
if (is_in) {
xvel0[OPS_ACC5(0,0,0)] = states[i].xvel;
yvel0[OPS_ACC6(0,0,0)] = states[i].yvel;
zvel0[OPS_ACC7(0,0,0)] = states[i].zvel;
}
}
}
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
__global__ void ops_generate_chunk_kernel(
const double* __restrict arg0,
const double* __restrict arg1,
const double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
const double* __restrict arg8,
const double* __restrict arg9,
const double* __restrict arg10,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 0*1 * xdim0_generate_chunk_kernel + idx_z * 0*1 * xdim0_generate_chunk_kernel * ydim0_generate_chunk_kernel;
arg1 += idx_x * 0*1 + idx_y * 1*1 * xdim1_generate_chunk_kernel + idx_z * 0*1 * xdim1_generate_chunk_kernel * ydim1_generate_chunk_kernel;
arg2 += idx_x * 0*1 + idx_y * 0*1 * xdim2_generate_chunk_kernel + idx_z * 1*1 * xdim2_generate_chunk_kernel * ydim2_generate_chunk_kernel;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_generate_chunk_kernel + idx_z * 1*1 * xdim3_generate_chunk_kernel * ydim3_generate_chunk_kernel;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_generate_chunk_kernel + idx_z * 1*1 * xdim4_generate_chunk_kernel * ydim4_generate_chunk_kernel;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_generate_chunk_kernel + idx_z * 1*1 * xdim5_generate_chunk_kernel * ydim5_generate_chunk_kernel;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_generate_chunk_kernel + idx_z * 1*1 * xdim6_generate_chunk_kernel * ydim6_generate_chunk_kernel;
arg7 += idx_x * 1*1 + idx_y * 1*1 * xdim7_generate_chunk_kernel + idx_z * 1*1 * xdim7_generate_chunk_kernel * ydim7_generate_chunk_kernel;
arg8 += idx_x * 1*1 + idx_y * 0*1 * xdim8_generate_chunk_kernel + idx_z * 0*1 * xdim8_generate_chunk_kernel * ydim8_generate_chunk_kernel;
arg9 += idx_x * 0*1 + idx_y * 1*1 * xdim9_generate_chunk_kernel + idx_z * 0*1 * xdim9_generate_chunk_kernel * ydim9_generate_chunk_kernel;
arg10 += idx_x * 0*1 + idx_y * 0*1 * xdim10_generate_chunk_kernel + idx_z * 1*1 * xdim10_generate_chunk_kernel * ydim10_generate_chunk_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
generate_chunk_kernel_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7, arg8,
arg9, arg10);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10) {
#else
void ops_par_loop_generate_chunk_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[11] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,11,range,10)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(10,"generate_chunk_kernel");
OPS_kernels[10].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
if (xdim0 != xdim0_generate_chunk_kernel_h || ydim0 != ydim0_generate_chunk_kernel_h || xdim1 != xdim1_generate_chunk_kernel_h || ydim1 != ydim1_generate_chunk_kernel_h || xdim2 != xdim2_generate_chunk_kernel_h || ydim2 != ydim2_generate_chunk_kernel_h || xdim3 != xdim3_generate_chunk_kernel_h || ydim3 != ydim3_generate_chunk_kernel_h || xdim4 != xdim4_generate_chunk_kernel_h || ydim4 != ydim4_generate_chunk_kernel_h || xdim5 != xdim5_generate_chunk_kernel_h || ydim5 != ydim5_generate_chunk_kernel_h || xdim6 != xdim6_generate_chunk_kernel_h || ydim6 != ydim6_generate_chunk_kernel_h || xdim7 != xdim7_generate_chunk_kernel_h || ydim7 != ydim7_generate_chunk_kernel_h || xdim8 != xdim8_generate_chunk_kernel_h || ydim8 != ydim8_generate_chunk_kernel_h || xdim9 != xdim9_generate_chunk_kernel_h || ydim9 != ydim9_generate_chunk_kernel_h || xdim10 != xdim10_generate_chunk_kernel_h || ydim10 != ydim10_generate_chunk_kernel_h) {
hipMemcpyToSymbol( xdim0_generate_chunk_kernel, &xdim0, sizeof(int) );
xdim0_generate_chunk_kernel_h = xdim0;
hipMemcpyToSymbol( ydim0_generate_chunk_kernel, &ydim0, sizeof(int) );
ydim0_generate_chunk_kernel_h = ydim0;
hipMemcpyToSymbol( xdim1_generate_chunk_kernel, &xdim1, sizeof(int) );
xdim1_generate_chunk_kernel_h = xdim1;
hipMemcpyToSymbol( ydim1_generate_chunk_kernel, &ydim1, sizeof(int) );
ydim1_generate_chunk_kernel_h = ydim1;
hipMemcpyToSymbol( xdim2_generate_chunk_kernel, &xdim2, sizeof(int) );
xdim2_generate_chunk_kernel_h = xdim2;
hipMemcpyToSymbol( ydim2_generate_chunk_kernel, &ydim2, sizeof(int) );
ydim2_generate_chunk_kernel_h = ydim2;
hipMemcpyToSymbol( xdim3_generate_chunk_kernel, &xdim3, sizeof(int) );
xdim3_generate_chunk_kernel_h = xdim3;
hipMemcpyToSymbol( ydim3_generate_chunk_kernel, &ydim3, sizeof(int) );
ydim3_generate_chunk_kernel_h = ydim3;
hipMemcpyToSymbol( xdim4_generate_chunk_kernel, &xdim4, sizeof(int) );
xdim4_generate_chunk_kernel_h = xdim4;
hipMemcpyToSymbol( ydim4_generate_chunk_kernel, &ydim4, sizeof(int) );
ydim4_generate_chunk_kernel_h = ydim4;
hipMemcpyToSymbol( xdim5_generate_chunk_kernel, &xdim5, sizeof(int) );
xdim5_generate_chunk_kernel_h = xdim5;
hipMemcpyToSymbol( ydim5_generate_chunk_kernel, &ydim5, sizeof(int) );
ydim5_generate_chunk_kernel_h = ydim5;
hipMemcpyToSymbol( xdim6_generate_chunk_kernel, &xdim6, sizeof(int) );
xdim6_generate_chunk_kernel_h = xdim6;
hipMemcpyToSymbol( ydim6_generate_chunk_kernel, &ydim6, sizeof(int) );
ydim6_generate_chunk_kernel_h = ydim6;
hipMemcpyToSymbol( xdim7_generate_chunk_kernel, &xdim7, sizeof(int) );
xdim7_generate_chunk_kernel_h = xdim7;
hipMemcpyToSymbol( ydim7_generate_chunk_kernel, &ydim7, sizeof(int) );
ydim7_generate_chunk_kernel_h = ydim7;
hipMemcpyToSymbol( xdim8_generate_chunk_kernel, &xdim8, sizeof(int) );
xdim8_generate_chunk_kernel_h = xdim8;
hipMemcpyToSymbol( ydim8_generate_chunk_kernel, &ydim8, sizeof(int) );
ydim8_generate_chunk_kernel_h = ydim8;
hipMemcpyToSymbol( xdim9_generate_chunk_kernel, &xdim9, sizeof(int) );
xdim9_generate_chunk_kernel_h = xdim9;
hipMemcpyToSymbol( ydim9_generate_chunk_kernel, &ydim9, sizeof(int) );
ydim9_generate_chunk_kernel_h = ydim9;
hipMemcpyToSymbol( xdim10_generate_chunk_kernel, &xdim10, sizeof(int) );
xdim10_generate_chunk_kernel_h = xdim10;
hipMemcpyToSymbol( ydim10_generate_chunk_kernel, &ydim10, sizeof(int) );
ydim10_generate_chunk_kernel_h = ydim10;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
char *p_a[11];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 11);
ops_halo_exchanges(args,11,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[10].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_generate_chunk_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[10].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 11);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[7],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[10].mpi_time += t2-t1;
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg10);
}
}
#ifdef OPS_LAZY
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 10;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 10;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 11;
desc->args = (ops_arg*)malloc(11*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->function = ops_par_loop_generate_chunk_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(10,"generate_chunk_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| ef1f7b03de2ce33e3bc8408b7715230e4c1202ac.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_generate_chunk_kernel;
int xdim0_generate_chunk_kernel_h = -1;
__constant__ int ydim0_generate_chunk_kernel;
int ydim0_generate_chunk_kernel_h = -1;
__constant__ int xdim1_generate_chunk_kernel;
int xdim1_generate_chunk_kernel_h = -1;
__constant__ int ydim1_generate_chunk_kernel;
int ydim1_generate_chunk_kernel_h = -1;
__constant__ int xdim2_generate_chunk_kernel;
int xdim2_generate_chunk_kernel_h = -1;
__constant__ int ydim2_generate_chunk_kernel;
int ydim2_generate_chunk_kernel_h = -1;
__constant__ int xdim3_generate_chunk_kernel;
int xdim3_generate_chunk_kernel_h = -1;
__constant__ int ydim3_generate_chunk_kernel;
int ydim3_generate_chunk_kernel_h = -1;
__constant__ int xdim4_generate_chunk_kernel;
int xdim4_generate_chunk_kernel_h = -1;
__constant__ int ydim4_generate_chunk_kernel;
int ydim4_generate_chunk_kernel_h = -1;
__constant__ int xdim5_generate_chunk_kernel;
int xdim5_generate_chunk_kernel_h = -1;
__constant__ int ydim5_generate_chunk_kernel;
int ydim5_generate_chunk_kernel_h = -1;
__constant__ int xdim6_generate_chunk_kernel;
int xdim6_generate_chunk_kernel_h = -1;
__constant__ int ydim6_generate_chunk_kernel;
int ydim6_generate_chunk_kernel_h = -1;
__constant__ int xdim7_generate_chunk_kernel;
int xdim7_generate_chunk_kernel_h = -1;
__constant__ int ydim7_generate_chunk_kernel;
int ydim7_generate_chunk_kernel_h = -1;
__constant__ int xdim8_generate_chunk_kernel;
int xdim8_generate_chunk_kernel_h = -1;
__constant__ int ydim8_generate_chunk_kernel;
int ydim8_generate_chunk_kernel_h = -1;
__constant__ int xdim9_generate_chunk_kernel;
int xdim9_generate_chunk_kernel_h = -1;
__constant__ int ydim9_generate_chunk_kernel;
int ydim9_generate_chunk_kernel_h = -1;
__constant__ int xdim10_generate_chunk_kernel;
int xdim10_generate_chunk_kernel_h = -1;
__constant__ int ydim10_generate_chunk_kernel;
int ydim10_generate_chunk_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
#define OPS_ACC0(x,y,z) (x+xdim0_generate_chunk_kernel*(y)+xdim0_generate_chunk_kernel*ydim0_generate_chunk_kernel*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_generate_chunk_kernel*(y)+xdim1_generate_chunk_kernel*ydim1_generate_chunk_kernel*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_generate_chunk_kernel*(y)+xdim2_generate_chunk_kernel*ydim2_generate_chunk_kernel*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_generate_chunk_kernel*(y)+xdim3_generate_chunk_kernel*ydim3_generate_chunk_kernel*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_generate_chunk_kernel*(y)+xdim4_generate_chunk_kernel*ydim4_generate_chunk_kernel*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_generate_chunk_kernel*(y)+xdim5_generate_chunk_kernel*ydim5_generate_chunk_kernel*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_generate_chunk_kernel*(y)+xdim6_generate_chunk_kernel*ydim6_generate_chunk_kernel*(z))
#define OPS_ACC7(x,y,z) (x+xdim7_generate_chunk_kernel*(y)+xdim7_generate_chunk_kernel*ydim7_generate_chunk_kernel*(z))
#define OPS_ACC8(x,y,z) (x+xdim8_generate_chunk_kernel*(y)+xdim8_generate_chunk_kernel*ydim8_generate_chunk_kernel*(z))
#define OPS_ACC9(x,y,z) (x+xdim9_generate_chunk_kernel*(y)+xdim9_generate_chunk_kernel*ydim9_generate_chunk_kernel*(z))
#define OPS_ACC10(x,y,z) (x+xdim10_generate_chunk_kernel*(y)+xdim10_generate_chunk_kernel*ydim10_generate_chunk_kernel*(z))
//user function
__device__
void generate_chunk_kernel_gpu( const double *vertexx,
const double *vertexy, const double *vertexz,
double *energy0, double *density0,
double *xvel0, double *yvel0, double *zvel0,
const double *cellx, const double *celly, const double *cellz) {
double radius, x_cent, y_cent, z_cent;
int is_in = 0;
energy0[OPS_ACC3(0,0,0)]= states[0].energy;
density0[OPS_ACC4(0,0,0)]= states[0].density;
xvel0[OPS_ACC5(0,0,0)]=states[0].xvel;
yvel0[OPS_ACC6(0,0,0)]=states[0].yvel;
zvel0[OPS_ACC7(0,0,0)]=states[0].zvel;
for(int i = 1; i<number_of_states; i++) {
x_cent=states[i].xmin;
y_cent=states[i].ymin;
z_cent=states[i].zmin;
if (states[i].geometry == g_cube) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
for (int k1 = -1; k1 <= 0; k1++) {
if(vertexx[OPS_ACC0(1+i1,0,0)] >= states[i].xmin && vertexx[OPS_ACC0(0+i1,0,0)] < states[i].xmax) {
if(vertexy[OPS_ACC1(0,1+j1,0)] >= states[i].ymin && vertexy[OPS_ACC1(0,0+j1,0)] < states[i].ymax) {
if(vertexz[OPS_ACC2(0,0,1+k1)] >= states[i].zmin && vertexz[OPS_ACC2(0,0,0+k1)] < states[i].zmax) {
is_in=1;
}
}
}
}
}
}
if(vertexx[OPS_ACC0(1,0,0)] >= states[i].xmin && vertexx[OPS_ACC0(0,0,0)] < states[i].xmax) {
if(vertexy[OPS_ACC1(0,1,0)] >= states[i].ymin && vertexy[OPS_ACC1(0,0,0)] < states[i].ymax) {
if(vertexz[OPS_ACC2(0,0,1)] >= states[i].zmin && vertexz[OPS_ACC2(0,0,0)] < states[i].zmax) {
energy0[OPS_ACC3(0,0,0)] = states[i].energy;
density0[OPS_ACC4(0,0,0)] = states[i].density;
}
}
}
if (is_in) {
xvel0[OPS_ACC5(0,0,0)] = states[i].xvel;
yvel0[OPS_ACC6(0,0,0)] = states[i].yvel;
zvel0[OPS_ACC7(0,0,0)] = states[i].zvel;
}
}
else if(states[i].geometry == g_sphe) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
for (int k1 = -1; k1 <= 0; k1++) {
radius = sqrt ((cellx[OPS_ACC8(0,0,0)] - x_cent) * (cellx[OPS_ACC8(0,0,0)] - x_cent) +
(celly[OPS_ACC9(0,0,0)] - y_cent) * (celly[OPS_ACC9(0,0,0)] - y_cent) +
(cellz[OPS_ACC10(0,0,0)] - z_cent) * (cellz[OPS_ACC10(0,0,0)] - z_cent));
if(radius <= states[i].radius) is_in = 1;
}
}
}
if(radius <= states[i].radius) {
energy0[OPS_ACC3(0,0,0)] = states[i].energy;
density0[OPS_ACC4(0,0,0)] = states[i].density;
}
if (is_in) {
xvel0[OPS_ACC5(0,0,0)] = states[i].xvel;
yvel0[OPS_ACC6(0,0,0)] = states[i].yvel;
zvel0[OPS_ACC7(0,0,0)] = states[i].zvel;
}
}
else if(states[i].geometry == g_point) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
for (int k1 = -1; k1 <= 0; k1++) {
if(vertexx[OPS_ACC0(0+i1,0,0)] == x_cent && vertexy[OPS_ACC1(0,0+j1,0)] == y_cent && vertexz[OPS_ACC2(0,0,0+k1)] == z_cent)
is_in = 1;
}
}
}
if(vertexx[OPS_ACC0(0,0,0)] == x_cent && vertexy[OPS_ACC1(0,0,0)] == y_cent && vertexz[OPS_ACC2(0,0,0)] == z_cent) {
energy0[OPS_ACC3(0,0,0)] = states[i].energy;
density0[OPS_ACC4(0,0,0)] = states[i].density;
}
if (is_in) {
xvel0[OPS_ACC5(0,0,0)] = states[i].xvel;
yvel0[OPS_ACC6(0,0,0)] = states[i].yvel;
zvel0[OPS_ACC7(0,0,0)] = states[i].zvel;
}
}
}
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
__global__ void ops_generate_chunk_kernel(
const double* __restrict arg0,
const double* __restrict arg1,
const double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
const double* __restrict arg8,
const double* __restrict arg9,
const double* __restrict arg10,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 0*1 * xdim0_generate_chunk_kernel + idx_z * 0*1 * xdim0_generate_chunk_kernel * ydim0_generate_chunk_kernel;
arg1 += idx_x * 0*1 + idx_y * 1*1 * xdim1_generate_chunk_kernel + idx_z * 0*1 * xdim1_generate_chunk_kernel * ydim1_generate_chunk_kernel;
arg2 += idx_x * 0*1 + idx_y * 0*1 * xdim2_generate_chunk_kernel + idx_z * 1*1 * xdim2_generate_chunk_kernel * ydim2_generate_chunk_kernel;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_generate_chunk_kernel + idx_z * 1*1 * xdim3_generate_chunk_kernel * ydim3_generate_chunk_kernel;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_generate_chunk_kernel + idx_z * 1*1 * xdim4_generate_chunk_kernel * ydim4_generate_chunk_kernel;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_generate_chunk_kernel + idx_z * 1*1 * xdim5_generate_chunk_kernel * ydim5_generate_chunk_kernel;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_generate_chunk_kernel + idx_z * 1*1 * xdim6_generate_chunk_kernel * ydim6_generate_chunk_kernel;
arg7 += idx_x * 1*1 + idx_y * 1*1 * xdim7_generate_chunk_kernel + idx_z * 1*1 * xdim7_generate_chunk_kernel * ydim7_generate_chunk_kernel;
arg8 += idx_x * 1*1 + idx_y * 0*1 * xdim8_generate_chunk_kernel + idx_z * 0*1 * xdim8_generate_chunk_kernel * ydim8_generate_chunk_kernel;
arg9 += idx_x * 0*1 + idx_y * 1*1 * xdim9_generate_chunk_kernel + idx_z * 0*1 * xdim9_generate_chunk_kernel * ydim9_generate_chunk_kernel;
arg10 += idx_x * 0*1 + idx_y * 0*1 * xdim10_generate_chunk_kernel + idx_z * 1*1 * xdim10_generate_chunk_kernel * ydim10_generate_chunk_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
generate_chunk_kernel_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7, arg8,
arg9, arg10);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10) {
#else
void ops_par_loop_generate_chunk_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[11] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,11,range,10)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(10,"generate_chunk_kernel");
OPS_kernels[10].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
if (xdim0 != xdim0_generate_chunk_kernel_h || ydim0 != ydim0_generate_chunk_kernel_h || xdim1 != xdim1_generate_chunk_kernel_h || ydim1 != ydim1_generate_chunk_kernel_h || xdim2 != xdim2_generate_chunk_kernel_h || ydim2 != ydim2_generate_chunk_kernel_h || xdim3 != xdim3_generate_chunk_kernel_h || ydim3 != ydim3_generate_chunk_kernel_h || xdim4 != xdim4_generate_chunk_kernel_h || ydim4 != ydim4_generate_chunk_kernel_h || xdim5 != xdim5_generate_chunk_kernel_h || ydim5 != ydim5_generate_chunk_kernel_h || xdim6 != xdim6_generate_chunk_kernel_h || ydim6 != ydim6_generate_chunk_kernel_h || xdim7 != xdim7_generate_chunk_kernel_h || ydim7 != ydim7_generate_chunk_kernel_h || xdim8 != xdim8_generate_chunk_kernel_h || ydim8 != ydim8_generate_chunk_kernel_h || xdim9 != xdim9_generate_chunk_kernel_h || ydim9 != ydim9_generate_chunk_kernel_h || xdim10 != xdim10_generate_chunk_kernel_h || ydim10 != ydim10_generate_chunk_kernel_h) {
cudaMemcpyToSymbol( xdim0_generate_chunk_kernel, &xdim0, sizeof(int) );
xdim0_generate_chunk_kernel_h = xdim0;
cudaMemcpyToSymbol( ydim0_generate_chunk_kernel, &ydim0, sizeof(int) );
ydim0_generate_chunk_kernel_h = ydim0;
cudaMemcpyToSymbol( xdim1_generate_chunk_kernel, &xdim1, sizeof(int) );
xdim1_generate_chunk_kernel_h = xdim1;
cudaMemcpyToSymbol( ydim1_generate_chunk_kernel, &ydim1, sizeof(int) );
ydim1_generate_chunk_kernel_h = ydim1;
cudaMemcpyToSymbol( xdim2_generate_chunk_kernel, &xdim2, sizeof(int) );
xdim2_generate_chunk_kernel_h = xdim2;
cudaMemcpyToSymbol( ydim2_generate_chunk_kernel, &ydim2, sizeof(int) );
ydim2_generate_chunk_kernel_h = ydim2;
cudaMemcpyToSymbol( xdim3_generate_chunk_kernel, &xdim3, sizeof(int) );
xdim3_generate_chunk_kernel_h = xdim3;
cudaMemcpyToSymbol( ydim3_generate_chunk_kernel, &ydim3, sizeof(int) );
ydim3_generate_chunk_kernel_h = ydim3;
cudaMemcpyToSymbol( xdim4_generate_chunk_kernel, &xdim4, sizeof(int) );
xdim4_generate_chunk_kernel_h = xdim4;
cudaMemcpyToSymbol( ydim4_generate_chunk_kernel, &ydim4, sizeof(int) );
ydim4_generate_chunk_kernel_h = ydim4;
cudaMemcpyToSymbol( xdim5_generate_chunk_kernel, &xdim5, sizeof(int) );
xdim5_generate_chunk_kernel_h = xdim5;
cudaMemcpyToSymbol( ydim5_generate_chunk_kernel, &ydim5, sizeof(int) );
ydim5_generate_chunk_kernel_h = ydim5;
cudaMemcpyToSymbol( xdim6_generate_chunk_kernel, &xdim6, sizeof(int) );
xdim6_generate_chunk_kernel_h = xdim6;
cudaMemcpyToSymbol( ydim6_generate_chunk_kernel, &ydim6, sizeof(int) );
ydim6_generate_chunk_kernel_h = ydim6;
cudaMemcpyToSymbol( xdim7_generate_chunk_kernel, &xdim7, sizeof(int) );
xdim7_generate_chunk_kernel_h = xdim7;
cudaMemcpyToSymbol( ydim7_generate_chunk_kernel, &ydim7, sizeof(int) );
ydim7_generate_chunk_kernel_h = ydim7;
cudaMemcpyToSymbol( xdim8_generate_chunk_kernel, &xdim8, sizeof(int) );
xdim8_generate_chunk_kernel_h = xdim8;
cudaMemcpyToSymbol( ydim8_generate_chunk_kernel, &ydim8, sizeof(int) );
ydim8_generate_chunk_kernel_h = ydim8;
cudaMemcpyToSymbol( xdim9_generate_chunk_kernel, &xdim9, sizeof(int) );
xdim9_generate_chunk_kernel_h = xdim9;
cudaMemcpyToSymbol( ydim9_generate_chunk_kernel, &ydim9, sizeof(int) );
ydim9_generate_chunk_kernel_h = ydim9;
cudaMemcpyToSymbol( xdim10_generate_chunk_kernel, &xdim10, sizeof(int) );
xdim10_generate_chunk_kernel_h = xdim10;
cudaMemcpyToSymbol( ydim10_generate_chunk_kernel, &ydim10, sizeof(int) );
ydim10_generate_chunk_kernel_h = ydim10;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
char *p_a[11];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 11);
ops_halo_exchanges(args,11,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[10].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_generate_chunk_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[10].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 11);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[7],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[10].mpi_time += t2-t1;
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg10);
}
}
#ifdef OPS_LAZY
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 10;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 10;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 11;
desc->args = (ops_arg*)malloc(11*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->function = ops_par_loop_generate_chunk_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(10,"generate_chunk_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
98e03130a9b9357490ceb284ffc870cfe98bd24b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "spirit_calibration.h"
#include "vector_td_operators.h"
#include "vector_td_utilities.h"
#include "cuNDArray_elemwise.h"
#include "cuNDArray_operators.h"
#include "cuNDArray_reductions.h"
#include "cuNDArray_utils.h"
#include "cuNDArray_blas.h"
#include "cuNDFFT.h"
#include "cudaDeviceManager.h"
#include "setup_grid.h"
#include "complext.h"
#include "CUBLASContextProvider.h"
#include "GPUTimer.h"
#include "hoNDArray_fileio.h"
#include "htgrappa.h"
#include <rocblas.h>
//#include <cula_lapack_device.h>
namespace Gadgetron {
static __global__ void
compute_system_matrix_kernel( intd2 dims,
int num_coils,
int kernel_size,
float_complext *kspace,
float_complext *A )
{
// The grid contains one thread per coil element.
// Each thread reads its corresponding data element and is responsible
// for filling into the corresponding kernel_size*kernel entries in the matrix.
//
// The storage format is column major due to BLAS/LAPACK conventions.
// This increases the overhead of writes in this kernel (they are non-coaslesced and MANY).
// TODO: optimize for performance.
//
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const int elements_per_coil = prod(dims);
if( idx < elements_per_coil*num_coils ){
// Get the k-space value for this thread
//
float_complext val = kspace[idx];
const int num_kernel_elements = kernel_size*kernel_size-1;
const int coil = idx/elements_per_coil;
const int idx_in_coil = idx-coil*elements_per_coil;
// Loop over the number of outputs produced per thread
//
const int half_kernel_size = kernel_size>>1;
for( int j = -half_kernel_size; j<half_kernel_size+1; j++ ){ // row iterator
for( int i = -half_kernel_size; i<half_kernel_size+1; i++ ){ // column iterator
if( j==0 && i==0 ) continue; // The weight of the central points is set to 0
int kernel_idx = co_to_idx( intd2(i+half_kernel_size,j+half_kernel_size), intd2(kernel_size,kernel_size) );
if( (j==0 && i>0) || j>0 ) kernel_idx--;
const int m =
(idx_in_coil+j*dims[0]+i+elements_per_coil)%elements_per_coil; // row idx
const int n =
coil*num_kernel_elements + kernel_idx;
const int A_idx =
n*elements_per_coil + m; // Column major storage
A[A_idx] = val;
}
}
}
}
static __global__ void
write_convolution_masks_kernel( intd2 dims,
int num_coils,
int kernel_size,
float_complext *kernels,
float_complext *kspace )
{
// Write out convolution masks in the center of kspace
// - thus prepare for FFT into image space
//
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const int elements_per_coil = prod(dims);
if( idx < elements_per_coil*num_coils*num_coils ){
const int half_kernel_size = kernel_size>>1;
const int num_kernel_elements = kernel_size*kernel_size-1;
const int batch = idx/(elements_per_coil*num_coils);
const int idx_in_batch = idx-batch*elements_per_coil*num_coils;
const int coil = idx_in_batch/elements_per_coil;
const int idx_in_coil = idx_in_batch-coil*elements_per_coil;
const intd2 co = idx_to_co( idx_in_coil, dims ) - (dims>>1);
if( co[1] >= -half_kernel_size && co[1] <= half_kernel_size &&
co[0] >= -half_kernel_size && co[0] <= half_kernel_size ){
// Compute kernel index
// - keeping in mind the central elements are missing (forced to 0)
//
int kernel_idx = co_to_idx( co+intd2(half_kernel_size, half_kernel_size), intd2(kernel_size, kernel_size) );
if( co[1] == 0 && co[0] == 0 ) {
kspace[idx] = float_complext(0.0f);
}
else {
if( (co[1]==0 && co[0]>0) || co[1]>0 ) kernel_idx--;
kspace[idx] = kernels[batch*num_kernel_elements*num_coils + coil*num_kernel_elements + kernel_idx];
}
}
else{
kspace[idx] = float_complext(0.0f);
}
}
}
boost::shared_ptr< cuNDArray<float_complext> >
estimate_spirit_kernels( cuNDArray<float_complext> *_kspace, unsigned int kernel_size )
{
// Calibration is performed in k-space.
// The result is Fourier transformed and returned as image space kernels.
// The convolution is expressed explicitly as a matrix equation an solved using BLAS/LAPACK.
//
if( _kspace == 0x0 ){
throw std::runtime_error("estimate_spirit_kernels: 0x0 input array");
}
if( _kspace->get_number_of_dimensions() != 3 ) {
throw std::runtime_error("estimate_spirit_kernels: Only 2D spirit is supported currently");
}
if( (kernel_size%2) == 0 ) {
throw std::runtime_error("estimate_spirit_kernels: The kernel size should be odd");
}
// Normalize input array to an average intensity of one per element
//
std::vector<size_t> old_dims = *_kspace->get_dimensions();
std::vector<size_t> dims= old_dims;
/*dims[0] /= 2;
dims[1] /= 2;*/
//dims[0]=36;
//dims[1]=36;
//cuNDArray<float_complext> kspace(_kspace);
vector_td<size_t,2> offset((old_dims[0]-dims[0])/2,(old_dims[1]-dims[1])/2);
cuNDArray<float_complext> kspace = crop<float_complext,2>(offset,from_std_vector<size_t,2>(dims),*_kspace);
float sum = nrm2(&kspace);
float_complext in_max = kspace[amax(&kspace)];
kspace /= (float(kspace.get_number_of_elements())/sum);
unsigned int num_coils = kspace.get_size(kspace.get_number_of_dimensions()-1);
unsigned int elements_per_coil = kspace.get_number_of_elements()/num_coils;
std::vector<size_t> out_dims;
out_dims.push_back(_kspace->get_size(0)); out_dims.push_back(_kspace->get_size(1));
out_dims.push_back(num_coils*num_coils);
boost::shared_ptr< cuNDArray<float_complext> > kernel_images
( new cuNDArray<float_complext>(&out_dims) );
// Clear to ones in case we terminate early
//
fill(kernel_images.get(), float_complext(1.0f/num_coils));
// Form m x n system matrix A
//
unsigned int m = elements_per_coil;
unsigned int n = num_coils*(kernel_size*kernel_size-1);
std::vector<size_t> A_dims; A_dims.push_back(m); A_dims.push_back(n);
cuNDArray<float_complext> A(&A_dims); clear(&A);
// Fill system matrix
//
dim3 blockDim; dim3 gridDim;
setup_grid( kspace.get_number_of_elements(), &blockDim, &gridDim );
hipLaunchKernelGGL(( compute_system_matrix_kernel), dim3(gridDim), dim3(blockDim) , 0, 0,
intd2(kspace.get_size(0), kspace.get_size(1)), num_coils, kernel_size,
kspace.get_data_ptr(), A.get_data_ptr() );
CHECK_FOR_CUDA_ERROR();
/*
static int counter = 0;
char filename[256];
sprintf((char*)filename, "_A_%d.cplx", counter);
write_nd_array<float_complext>( A.to_host().get(), filename );
counter++;
*/
// Compute A^H A
//
hipblasStatus_t stat;
hipblasHandle_t handle = *CUBLASContextProvider::instance()->getCublasHandle();
std::vector<size_t> AHA_dims(2,n);
cuNDArray<float_complext> AHA(&AHA_dims);
// Initialize AHA to identity (Tikhonov regularization)
//
float_complext one(1.0f);
clear(&AHA);
for( unsigned int i=0; i<n; i++ ){
hipMemcpy( AHA.get_data_ptr()+i*n+i, &one, sizeof(float_complext), hipMemcpyHostToDevice );
}
CHECK_FOR_CUDA_ERROR();
float_complext alpha(1.0f);
//float_complext beta(0.1f*in_max); // Tikhonov regularization weight
float_complext beta(0.0f); // Tikhonov regularization weight
stat = hipblasCgemm( handle, HIPBLAS_OP_C, HIPBLAS_OP_N,
n,n,m,
(cuFloatComplex*) &alpha,
(cuFloatComplex*) A.get_data_ptr(), m,
(cuFloatComplex*) A.get_data_ptr(), m,
(cuFloatComplex*) &beta,
(cuFloatComplex*) AHA.get_data_ptr(), n );
if (stat != HIPBLAS_STATUS_SUCCESS) {
std::cerr << "CUBLAS error code " << stat << std::endl;
throw std::runtime_error("estimate_spirit_kernels: CUBLAS error computing A^HA");
}
/*
static int counter = 0;
char filename[256];
sprintf((char*)filename, "_AHA_%d.cplx", counter);
write_nd_array<float_complext>( AHA.to_host().get(), filename );
counter++;
*/
// Multiply A^H with each coil image (to form the rhs)
//
std::vector<size_t> rhs_dims; rhs_dims.push_back(n); rhs_dims.push_back(num_coils);
cuNDArray<float_complext> rhs(&rhs_dims); clear(&rhs);
beta = float_complext(0.0f);
stat = hipblasCgemm( handle, HIPBLAS_OP_C, HIPBLAS_OP_N,
n, num_coils, m,
(cuFloatComplex*) &alpha,
(cuFloatComplex*) A.get_data_ptr(), m,
(cuFloatComplex*) kspace.get_data_ptr(), m,
(cuFloatComplex*) &beta,
(cuFloatComplex*) rhs.get_data_ptr(), n );
if (stat != HIPBLAS_STATUS_SUCCESS) {
std::cerr << "CUBLAS error code " << stat << std::endl;
throw std::runtime_error("estimate_spirit_kernels: CUBLAS error computing rhs");
}
/*
static int counter = 0;
char filename[256];
sprintf((char*)filename, "_rhs_%d.cplx", counter);
write_nd_array<float_complext>( rhs.to_host().get(), filename );
counter++;
*/
//CGELS is used rather than a more conventional solver as it is part of CULA free.
/*
culaStatus s = culaDeviceCgels( 'N', n, n, num_coils,
(culaDeviceFloatComplex*)AHA.get_data_ptr(), n,
(culaDeviceFloatComplex*)rhs.get_data_ptr(), n);
*/
{
//It actually turns out to be faster to do this inversion on the CPU. Problem is probably too small for GPU to make sense
//GPUTimer cpu_invert_time("CPU Inversion time");
ht_grappa_solve_spd_system(AHA, rhs);
}
/*
if( s != culaNoError ) {
if( s == 8 ){
std::cerr << "CULA error code " << s << ": " << culaGetStatusString(s) << std::endl;
std::cerr << "Assuming that the buffer is not yet filled and return ones" << std::endl;
return kernel_images;
}
std::cerr << "CULA error code " << s << ": " << culaGetStatusString(s) << std::endl;
culaInfo i = culaGetErrorInfo();
char buf[2048];
culaGetErrorInfoString(s, i, buf, sizeof(buf));
printf("Error %d: %s\n", (int)i, buf);
throw std::runtime_error("estimate_spirit_kernels: CULA error computing 'getrs'");
}
*/
//CULA will sometime return NaN without an explicit error. This code tests for NaNs and returns if found.
float nan_test = nrm2(&rhs);
if (nan_test != nan_test) return kernel_images;
// Fill k-spaces with the computed kernels at the center
//
setup_grid( kernel_images->get_number_of_elements(), &blockDim, &gridDim );
hipLaunchKernelGGL(( write_convolution_masks_kernel), dim3(gridDim), dim3(blockDim) , 0, 0,
intd2(kernel_images->get_size(0), kernel_images->get_size(1)), num_coils, kernel_size,
rhs.get_data_ptr(), kernel_images->get_data_ptr() );
CHECK_FOR_CUDA_ERROR();
// Batch FFT into image space
//
A.clear();
AHA.clear();
rhs.clear();
std::vector<size_t> dims_to_xform;
dims_to_xform.push_back(0); dims_to_xform.push_back(1);
cuNDFFT<float>::instance()->ifft( kernel_images.get(), &dims_to_xform, false );
/*
static int counter = 0;
char filename[256];
sprintf((char*)filename, "_kernels_%d.cplx", counter);
write_nd_array<float_complext>( kernel_images->to_host().get(), filename );
counter++;
*/
return kernel_images;
}
}
| 98e03130a9b9357490ceb284ffc870cfe98bd24b.cu | #include "spirit_calibration.h"
#include "vector_td_operators.h"
#include "vector_td_utilities.h"
#include "cuNDArray_elemwise.h"
#include "cuNDArray_operators.h"
#include "cuNDArray_reductions.h"
#include "cuNDArray_utils.h"
#include "cuNDArray_blas.h"
#include "cuNDFFT.h"
#include "cudaDeviceManager.h"
#include "setup_grid.h"
#include "complext.h"
#include "CUBLASContextProvider.h"
#include "GPUTimer.h"
#include "hoNDArray_fileio.h"
#include "htgrappa.h"
#include <cublas_v2.h>
//#include <cula_lapack_device.h>
namespace Gadgetron {
static __global__ void
compute_system_matrix_kernel( intd2 dims,
int num_coils,
int kernel_size,
float_complext *kspace,
float_complext *A )
{
// The grid contains one thread per coil element.
// Each thread reads its corresponding data element and is responsible
// for filling into the corresponding kernel_size*kernel entries in the matrix.
//
// The storage format is column major due to BLAS/LAPACK conventions.
// This increases the overhead of writes in this kernel (they are non-coaslesced and MANY).
// TODO: optimize for performance.
//
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const int elements_per_coil = prod(dims);
if( idx < elements_per_coil*num_coils ){
// Get the k-space value for this thread
//
float_complext val = kspace[idx];
const int num_kernel_elements = kernel_size*kernel_size-1;
const int coil = idx/elements_per_coil;
const int idx_in_coil = idx-coil*elements_per_coil;
// Loop over the number of outputs produced per thread
//
const int half_kernel_size = kernel_size>>1;
for( int j = -half_kernel_size; j<half_kernel_size+1; j++ ){ // row iterator
for( int i = -half_kernel_size; i<half_kernel_size+1; i++ ){ // column iterator
if( j==0 && i==0 ) continue; // The weight of the central points is set to 0
int kernel_idx = co_to_idx( intd2(i+half_kernel_size,j+half_kernel_size), intd2(kernel_size,kernel_size) );
if( (j==0 && i>0) || j>0 ) kernel_idx--;
const int m =
(idx_in_coil+j*dims[0]+i+elements_per_coil)%elements_per_coil; // row idx
const int n =
coil*num_kernel_elements + kernel_idx;
const int A_idx =
n*elements_per_coil + m; // Column major storage
A[A_idx] = val;
}
}
}
}
static __global__ void
write_convolution_masks_kernel( intd2 dims,
int num_coils,
int kernel_size,
float_complext *kernels,
float_complext *kspace )
{
// Write out convolution masks in the center of kspace
// - thus prepare for FFT into image space
//
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const int elements_per_coil = prod(dims);
if( idx < elements_per_coil*num_coils*num_coils ){
const int half_kernel_size = kernel_size>>1;
const int num_kernel_elements = kernel_size*kernel_size-1;
const int batch = idx/(elements_per_coil*num_coils);
const int idx_in_batch = idx-batch*elements_per_coil*num_coils;
const int coil = idx_in_batch/elements_per_coil;
const int idx_in_coil = idx_in_batch-coil*elements_per_coil;
const intd2 co = idx_to_co( idx_in_coil, dims ) - (dims>>1);
if( co[1] >= -half_kernel_size && co[1] <= half_kernel_size &&
co[0] >= -half_kernel_size && co[0] <= half_kernel_size ){
// Compute kernel index
// - keeping in mind the central elements are missing (forced to 0)
//
int kernel_idx = co_to_idx( co+intd2(half_kernel_size, half_kernel_size), intd2(kernel_size, kernel_size) );
if( co[1] == 0 && co[0] == 0 ) {
kspace[idx] = float_complext(0.0f);
}
else {
if( (co[1]==0 && co[0]>0) || co[1]>0 ) kernel_idx--;
kspace[idx] = kernels[batch*num_kernel_elements*num_coils + coil*num_kernel_elements + kernel_idx];
}
}
else{
kspace[idx] = float_complext(0.0f);
}
}
}
boost::shared_ptr< cuNDArray<float_complext> >
estimate_spirit_kernels( cuNDArray<float_complext> *_kspace, unsigned int kernel_size )
{
// Calibration is performed in k-space.
// The result is Fourier transformed and returned as image space kernels.
// The convolution is expressed explicitly as a matrix equation an solved using BLAS/LAPACK.
//
if( _kspace == 0x0 ){
throw std::runtime_error("estimate_spirit_kernels: 0x0 input array");
}
if( _kspace->get_number_of_dimensions() != 3 ) {
throw std::runtime_error("estimate_spirit_kernels: Only 2D spirit is supported currently");
}
if( (kernel_size%2) == 0 ) {
throw std::runtime_error("estimate_spirit_kernels: The kernel size should be odd");
}
// Normalize input array to an average intensity of one per element
//
std::vector<size_t> old_dims = *_kspace->get_dimensions();
std::vector<size_t> dims= old_dims;
/*dims[0] /= 2;
dims[1] /= 2;*/
//dims[0]=36;
//dims[1]=36;
//cuNDArray<float_complext> kspace(_kspace);
vector_td<size_t,2> offset((old_dims[0]-dims[0])/2,(old_dims[1]-dims[1])/2);
cuNDArray<float_complext> kspace = crop<float_complext,2>(offset,from_std_vector<size_t,2>(dims),*_kspace);
float sum = nrm2(&kspace);
float_complext in_max = kspace[amax(&kspace)];
kspace /= (float(kspace.get_number_of_elements())/sum);
unsigned int num_coils = kspace.get_size(kspace.get_number_of_dimensions()-1);
unsigned int elements_per_coil = kspace.get_number_of_elements()/num_coils;
std::vector<size_t> out_dims;
out_dims.push_back(_kspace->get_size(0)); out_dims.push_back(_kspace->get_size(1));
out_dims.push_back(num_coils*num_coils);
boost::shared_ptr< cuNDArray<float_complext> > kernel_images
( new cuNDArray<float_complext>(&out_dims) );
// Clear to ones in case we terminate early
//
fill(kernel_images.get(), float_complext(1.0f/num_coils));
// Form m x n system matrix A
//
unsigned int m = elements_per_coil;
unsigned int n = num_coils*(kernel_size*kernel_size-1);
std::vector<size_t> A_dims; A_dims.push_back(m); A_dims.push_back(n);
cuNDArray<float_complext> A(&A_dims); clear(&A);
// Fill system matrix
//
dim3 blockDim; dim3 gridDim;
setup_grid( kspace.get_number_of_elements(), &blockDim, &gridDim );
compute_system_matrix_kernel<<< gridDim, blockDim >>>
( intd2(kspace.get_size(0), kspace.get_size(1)), num_coils, kernel_size,
kspace.get_data_ptr(), A.get_data_ptr() );
CHECK_FOR_CUDA_ERROR();
/*
static int counter = 0;
char filename[256];
sprintf((char*)filename, "_A_%d.cplx", counter);
write_nd_array<float_complext>( A.to_host().get(), filename );
counter++;
*/
// Compute A^H A
//
cublasStatus_t stat;
cublasHandle_t handle = *CUBLASContextProvider::instance()->getCublasHandle();
std::vector<size_t> AHA_dims(2,n);
cuNDArray<float_complext> AHA(&AHA_dims);
// Initialize AHA to identity (Tikhonov regularization)
//
float_complext one(1.0f);
clear(&AHA);
for( unsigned int i=0; i<n; i++ ){
cudaMemcpy( AHA.get_data_ptr()+i*n+i, &one, sizeof(float_complext), cudaMemcpyHostToDevice );
}
CHECK_FOR_CUDA_ERROR();
float_complext alpha(1.0f);
//float_complext beta(0.1f*in_max); // Tikhonov regularization weight
float_complext beta(0.0f); // Tikhonov regularization weight
stat = cublasCgemm( handle, CUBLAS_OP_C, CUBLAS_OP_N,
n,n,m,
(cuFloatComplex*) &alpha,
(cuFloatComplex*) A.get_data_ptr(), m,
(cuFloatComplex*) A.get_data_ptr(), m,
(cuFloatComplex*) &beta,
(cuFloatComplex*) AHA.get_data_ptr(), n );
if (stat != CUBLAS_STATUS_SUCCESS) {
std::cerr << "CUBLAS error code " << stat << std::endl;
throw std::runtime_error("estimate_spirit_kernels: CUBLAS error computing A^HA");
}
/*
static int counter = 0;
char filename[256];
sprintf((char*)filename, "_AHA_%d.cplx", counter);
write_nd_array<float_complext>( AHA.to_host().get(), filename );
counter++;
*/
// Multiply A^H with each coil image (to form the rhs)
//
std::vector<size_t> rhs_dims; rhs_dims.push_back(n); rhs_dims.push_back(num_coils);
cuNDArray<float_complext> rhs(&rhs_dims); clear(&rhs);
beta = float_complext(0.0f);
stat = cublasCgemm( handle, CUBLAS_OP_C, CUBLAS_OP_N,
n, num_coils, m,
(cuFloatComplex*) &alpha,
(cuFloatComplex*) A.get_data_ptr(), m,
(cuFloatComplex*) kspace.get_data_ptr(), m,
(cuFloatComplex*) &beta,
(cuFloatComplex*) rhs.get_data_ptr(), n );
if (stat != CUBLAS_STATUS_SUCCESS) {
std::cerr << "CUBLAS error code " << stat << std::endl;
throw std::runtime_error("estimate_spirit_kernels: CUBLAS error computing rhs");
}
/*
static int counter = 0;
char filename[256];
sprintf((char*)filename, "_rhs_%d.cplx", counter);
write_nd_array<float_complext>( rhs.to_host().get(), filename );
counter++;
*/
//CGELS is used rather than a more conventional solver as it is part of CULA free.
/*
culaStatus s = culaDeviceCgels( 'N', n, n, num_coils,
(culaDeviceFloatComplex*)AHA.get_data_ptr(), n,
(culaDeviceFloatComplex*)rhs.get_data_ptr(), n);
*/
{
//It actually turns out to be faster to do this inversion on the CPU. Problem is probably too small for GPU to make sense
//GPUTimer cpu_invert_time("CPU Inversion time");
ht_grappa_solve_spd_system(AHA, rhs);
}
/*
if( s != culaNoError ) {
if( s == 8 ){
std::cerr << "CULA error code " << s << ": " << culaGetStatusString(s) << std::endl;
std::cerr << "Assuming that the buffer is not yet filled and return ones" << std::endl;
return kernel_images;
}
std::cerr << "CULA error code " << s << ": " << culaGetStatusString(s) << std::endl;
culaInfo i = culaGetErrorInfo();
char buf[2048];
culaGetErrorInfoString(s, i, buf, sizeof(buf));
printf("Error %d: %s\n", (int)i, buf);
throw std::runtime_error("estimate_spirit_kernels: CULA error computing 'getrs'");
}
*/
//CULA will sometime return NaN without an explicit error. This code tests for NaNs and returns if found.
float nan_test = nrm2(&rhs);
if (nan_test != nan_test) return kernel_images;
// Fill k-spaces with the computed kernels at the center
//
setup_grid( kernel_images->get_number_of_elements(), &blockDim, &gridDim );
write_convolution_masks_kernel<<< gridDim, blockDim >>>
( intd2(kernel_images->get_size(0), kernel_images->get_size(1)), num_coils, kernel_size,
rhs.get_data_ptr(), kernel_images->get_data_ptr() );
CHECK_FOR_CUDA_ERROR();
// Batch FFT into image space
//
A.clear();
AHA.clear();
rhs.clear();
std::vector<size_t> dims_to_xform;
dims_to_xform.push_back(0); dims_to_xform.push_back(1);
cuNDFFT<float>::instance()->ifft( kernel_images.get(), &dims_to_xform, false );
/*
static int counter = 0;
char filename[256];
sprintf((char*)filename, "_kernels_%d.cplx", counter);
write_nd_array<float_complext>( kernel_images->to_host().get(), filename );
counter++;
*/
return kernel_images;
}
}
|
c922cd202b57be32c2ebae849e2865f32a1598fa.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/copy_if.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
namespace {
// Returns true if the mask is true for index i in at least keep_threshold
// columns
struct valid_table_filter {
__device__ inline bool operator()(cudf::size_type i)
{
auto valid = [i](auto column_device_view) { return column_device_view.is_valid(i); };
auto count =
thrust::count_if(thrust::seq, keys_device_view.begin(), keys_device_view.end(), valid);
return (count >= keep_threshold);
}
valid_table_filter() = delete;
~valid_table_filter() = default;
valid_table_filter(cudf::table_device_view const& keys_device_view,
cudf::size_type keep_threshold)
: keep_threshold(keep_threshold), keys_device_view(keys_device_view)
{
}
protected:
cudf::size_type keep_threshold;
cudf::size_type num_columns;
cudf::table_device_view keys_device_view;
};
} // namespace
namespace cudf {
namespace detail {
/*
* Filters a table to remove null elements.
*/
std::unique_ptr<table> drop_nulls(table_view const& input,
std::vector<size_type> const& keys,
cudf::size_type keep_threshold,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto keys_view = input.select(keys);
if (keys_view.num_columns() == 0 || keys_view.num_rows() == 0 || not cudf::has_nulls(keys_view)) {
return std::make_unique<table>(input, stream, mr);
}
auto keys_device_view = cudf::table_device_view::create(keys_view, stream);
return cudf::detail::copy_if(
input, valid_table_filter{*keys_device_view, keep_threshold}, mr, stream);
}
} // namespace detail
/*
* Filters a table to remove null elements.
*/
std::unique_ptr<table> drop_nulls(table_view const& input,
std::vector<size_type> const& keys,
cudf::size_type keep_threshold,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::drop_nulls(input, keys, keep_threshold, mr);
}
/*
* Filters a table to remove null elements.
*/
std::unique_ptr<table> drop_nulls(table_view const& input,
std::vector<size_type> const& keys,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::drop_nulls(input, keys, keys.size(), mr);
}
} // namespace cudf
| c922cd202b57be32c2ebae849e2865f32a1598fa.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/copy_if.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
namespace {
// Returns true if the mask is true for index i in at least keep_threshold
// columns
struct valid_table_filter {
__device__ inline bool operator()(cudf::size_type i)
{
auto valid = [i](auto column_device_view) { return column_device_view.is_valid(i); };
auto count =
thrust::count_if(thrust::seq, keys_device_view.begin(), keys_device_view.end(), valid);
return (count >= keep_threshold);
}
valid_table_filter() = delete;
~valid_table_filter() = default;
valid_table_filter(cudf::table_device_view const& keys_device_view,
cudf::size_type keep_threshold)
: keep_threshold(keep_threshold), keys_device_view(keys_device_view)
{
}
protected:
cudf::size_type keep_threshold;
cudf::size_type num_columns;
cudf::table_device_view keys_device_view;
};
} // namespace
namespace cudf {
namespace detail {
/*
* Filters a table to remove null elements.
*/
std::unique_ptr<table> drop_nulls(table_view const& input,
std::vector<size_type> const& keys,
cudf::size_type keep_threshold,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto keys_view = input.select(keys);
if (keys_view.num_columns() == 0 || keys_view.num_rows() == 0 || not cudf::has_nulls(keys_view)) {
return std::make_unique<table>(input, stream, mr);
}
auto keys_device_view = cudf::table_device_view::create(keys_view, stream);
return cudf::detail::copy_if(
input, valid_table_filter{*keys_device_view, keep_threshold}, mr, stream);
}
} // namespace detail
/*
* Filters a table to remove null elements.
*/
std::unique_ptr<table> drop_nulls(table_view const& input,
std::vector<size_type> const& keys,
cudf::size_type keep_threshold,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::drop_nulls(input, keys, keep_threshold, mr);
}
/*
* Filters a table to remove null elements.
*/
std::unique_ptr<table> drop_nulls(table_view const& input,
std::vector<size_type> const& keys,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::drop_nulls(input, keys, keys.size(), mr);
}
} // namespace cudf
|
3fc3eaed8058478d86bafaac899f08e640e844d2.hip | // !!! This is a file automatically generated by hipify!!!
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2014 UT-Battelle, LLC.
// Copyright 2014 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//============================================================================
#include <vtkm/cont/cuda/internal/DeviceAdapterAlgorithmCuda.h>
#include <atomic>
#include <mutex>
namespace vtkm
{
namespace cont
{
namespace cuda
{
namespace internal
{
VTKM_CONT_EXPORT vtkm::UInt32 getNumSMs(int dId)
{
std::size_t index = 0;
if (dId > 0)
{
index = static_cast<size_t>(dId);
}
//check
static std::once_flag lookupBuiltFlag;
static std::vector<vtkm::UInt32> numSMs;
std::call_once(lookupBuiltFlag, []() {
//iterate over all devices
int numberOfSMs = 0;
int count = 0;
VTKM_CUDA_CALL(hipGetDeviceCount(&count));
numSMs.reserve(static_cast<std::size_t>(count));
for (int deviceId = 0; deviceId < count; ++deviceId)
{ //get the number of sm's per deviceId
VTKM_CUDA_CALL(
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId));
numSMs.push_back(static_cast<vtkm::UInt32>(numberOfSMs));
}
});
return numSMs[index];
}
}
} // end namespace cuda::internal
// we use cuda pinned memory to reduce the amount of synchronization
// and mem copies between the host and device.
auto DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCuda>::GetPinnedErrorArray()
-> const PinnedErrorArray&
{
constexpr vtkm::Id ERROR_ARRAY_SIZE = 1024;
static thread_local PinnedErrorArray local;
if (!local.HostPtr)
{
VTKM_CUDA_CALL(hipHostMalloc((void**)&local.HostPtr, ERROR_ARRAY_SIZE, hipHostMallocMapped));
VTKM_CUDA_CALL(hipHostGetDevicePointer(&local.DevicePtr, local.HostPtr, 0));
local.HostPtr[0] = '\0'; // clear
local.Size = ERROR_ARRAY_SIZE;
}
return local;
}
void DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCuda>::SetupErrorBuffer(
vtkm::exec::cuda::internal::TaskStrided& functor)
{
auto pinnedArray = GetPinnedErrorArray();
vtkm::exec::internal::ErrorMessageBuffer errorMessage(pinnedArray.DevicePtr, pinnedArray.Size);
functor.SetErrorMessageBuffer(errorMessage);
}
void DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCuda>::CheckForErrors()
{
auto pinnedArray = GetPinnedErrorArray();
if (pinnedArray.HostPtr[0] != '\0')
{
VTKM_CUDA_CALL(hipStreamSynchronize(cudaStreamPerThread));
auto excep = vtkm::cont::ErrorExecution(pinnedArray.HostPtr);
pinnedArray.HostPtr[0] = '\0'; // clear
throw excep;
}
}
void DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCuda>::GetGridsAndBlocks(
vtkm::UInt32& grids,
vtkm::UInt32& blocks,
vtkm::Id size)
{
(void)size;
int deviceId;
VTKM_CUDA_CALL(hipGetDevice(&deviceId)); //get deviceid from cuda
grids = 32 * cuda::internal::getNumSMs(deviceId);
blocks = 128;
}
void DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCuda>::GetGridsAndBlocks(
vtkm::UInt32& grids,
dim3& blocks,
const dim3& size)
{
int deviceId;
VTKM_CUDA_CALL(hipGetDevice(&deviceId)); //get deviceid from cuda
grids = 32 * cuda::internal::getNumSMs(deviceId);
if (size.x == 0)
{ //grids that have no x dimension
blocks.x = 1;
blocks.y = 16;
blocks.z = 8;
}
else if (size.x > 128)
{
blocks.x = 64;
blocks.y = 2;
blocks.z = 1;
}
else
{ //for really small grids
blocks.x = 8;
blocks.y = 4;
blocks.z = 4;
}
}
}
} // end namespace vtkm::cont
| 3fc3eaed8058478d86bafaac899f08e640e844d2.cu | //============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2014 UT-Battelle, LLC.
// Copyright 2014 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//============================================================================
#include <vtkm/cont/cuda/internal/DeviceAdapterAlgorithmCuda.h>
#include <atomic>
#include <mutex>
namespace vtkm
{
namespace cont
{
namespace cuda
{
namespace internal
{
VTKM_CONT_EXPORT vtkm::UInt32 getNumSMs(int dId)
{
std::size_t index = 0;
if (dId > 0)
{
index = static_cast<size_t>(dId);
}
//check
static std::once_flag lookupBuiltFlag;
static std::vector<vtkm::UInt32> numSMs;
std::call_once(lookupBuiltFlag, []() {
//iterate over all devices
int numberOfSMs = 0;
int count = 0;
VTKM_CUDA_CALL(cudaGetDeviceCount(&count));
numSMs.reserve(static_cast<std::size_t>(count));
for (int deviceId = 0; deviceId < count; ++deviceId)
{ //get the number of sm's per deviceId
VTKM_CUDA_CALL(
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId));
numSMs.push_back(static_cast<vtkm::UInt32>(numberOfSMs));
}
});
return numSMs[index];
}
}
} // end namespace cuda::internal
// we use cuda pinned memory to reduce the amount of synchronization
// and mem copies between the host and device.
auto DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCuda>::GetPinnedErrorArray()
-> const PinnedErrorArray&
{
constexpr vtkm::Id ERROR_ARRAY_SIZE = 1024;
static thread_local PinnedErrorArray local;
if (!local.HostPtr)
{
VTKM_CUDA_CALL(cudaMallocHost((void**)&local.HostPtr, ERROR_ARRAY_SIZE, cudaHostAllocMapped));
VTKM_CUDA_CALL(cudaHostGetDevicePointer(&local.DevicePtr, local.HostPtr, 0));
local.HostPtr[0] = '\0'; // clear
local.Size = ERROR_ARRAY_SIZE;
}
return local;
}
void DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCuda>::SetupErrorBuffer(
vtkm::exec::cuda::internal::TaskStrided& functor)
{
auto pinnedArray = GetPinnedErrorArray();
vtkm::exec::internal::ErrorMessageBuffer errorMessage(pinnedArray.DevicePtr, pinnedArray.Size);
functor.SetErrorMessageBuffer(errorMessage);
}
void DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCuda>::CheckForErrors()
{
auto pinnedArray = GetPinnedErrorArray();
if (pinnedArray.HostPtr[0] != '\0')
{
VTKM_CUDA_CALL(cudaStreamSynchronize(cudaStreamPerThread));
auto excep = vtkm::cont::ErrorExecution(pinnedArray.HostPtr);
pinnedArray.HostPtr[0] = '\0'; // clear
throw excep;
}
}
void DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCuda>::GetGridsAndBlocks(
vtkm::UInt32& grids,
vtkm::UInt32& blocks,
vtkm::Id size)
{
(void)size;
int deviceId;
VTKM_CUDA_CALL(cudaGetDevice(&deviceId)); //get deviceid from cuda
grids = 32 * cuda::internal::getNumSMs(deviceId);
blocks = 128;
}
void DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCuda>::GetGridsAndBlocks(
vtkm::UInt32& grids,
dim3& blocks,
const dim3& size)
{
int deviceId;
VTKM_CUDA_CALL(cudaGetDevice(&deviceId)); //get deviceid from cuda
grids = 32 * cuda::internal::getNumSMs(deviceId);
if (size.x == 0)
{ //grids that have no x dimension
blocks.x = 1;
blocks.y = 16;
blocks.z = 8;
}
else if (size.x > 128)
{
blocks.x = 64;
blocks.y = 2;
blocks.z = 1;
}
else
{ //for really small grids
blocks.x = 8;
blocks.y = 4;
blocks.z = 4;
}
}
}
} // end namespace vtkm::cont
|
13fe28b3cd8b3602e4df18dd36f5f7b4eab3025b.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
* Copyright 2010 Duane Merrill
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*
*
* AUTHORS' REQUEST:
*
* If you use|reference|benchmark this code, please cite our Technical
* Report (http://www.cs.virginia.edu/~dgm4d/papers/RadixSortTR.pdf):
*
* @TechReport{ Merrill:Sorting:2010,
* author = "Duane Merrill and Andrew Grimshaw",
* title = "Revisiting Sorting for GPGPU Stream Architectures",
* year = "2010",
* institution = "University of Virginia, Department of Computer Science",
* address = "Charlottesville, VA, USA",
* number = "CS2010-03"
* }
*
* For more information, see our Google Code project site:
* http://code.google.com/p/back40computing/
*
* Thanks!
******************************************************************************/
/******************************************************************************
* Radix Sorting API
******************************************************************************/
#pragma once
#include <voxelpipe/b40c/KernelCommon/b40c_kernel_utils.cu>
#include <voxelpipe/b40c/LsbRadixSort/kernel/radixsort_kernel_common.cu>
namespace b40c {
// Debugging options
static bool RADIXSORT_DEBUG = false;
/**
* Class encapsulating device properties
*/
class CudaProperties
{
public:
// Information about our target device
hipDeviceProp_t device_props;
int device_sm_version;
// Information about our kernel assembly
int kernel_ptx_version;
public:
CudaProperties()
{
// Get current device properties
int current_device;
hipGetDevice(¤t_device);
hipGetDeviceProperties(&device_props, current_device);
device_sm_version = device_props.major * 100 + device_props.minor * 10;
// Get SM version of compiled kernel assemblies
hipFuncAttributes flush_kernel_attrs;
hipFuncGetAttributes(&flush_kernel_attrs, FlushKernel<void>);
kernel_ptx_version = flush_kernel_attrs.ptxVersion * 10;
}
};
/**
* Base class for SRTS radix sorting enactors.
*/
template <typename K, typename V, typename Storage>
class BaseRadixSortingEnactor
{
protected:
/**
* Whether or not this instance can be used to sort satellite values
*/
static bool KeysOnly()
{
return IsKeysOnly<V>();
}
protected:
//Device properties
const CudaProperties cuda_props;
protected:
/**
* Constructor.
*/
BaseRadixSortingEnactor(const CudaProperties &props = CudaProperties()) :
cuda_props(props) {}
public:
/**
* Destructor
*/
virtual ~BaseRadixSortingEnactor() {}
/**
* Enacts a radix sorting operation on the specified device data.
*
* @return hipSuccess on success, error enumeration otherwise
*/
virtual hipError_t EnactSort(Storage &problem_storage) = 0;
};
}// namespace b40c
| 13fe28b3cd8b3602e4df18dd36f5f7b4eab3025b.cu | /******************************************************************************
* Copyright 2010 Duane Merrill
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*
*
* AUTHORS' REQUEST:
*
* If you use|reference|benchmark this code, please cite our Technical
* Report (http://www.cs.virginia.edu/~dgm4d/papers/RadixSortTR.pdf):
*
* @TechReport{ Merrill:Sorting:2010,
* author = "Duane Merrill and Andrew Grimshaw",
* title = "Revisiting Sorting for GPGPU Stream Architectures",
* year = "2010",
* institution = "University of Virginia, Department of Computer Science",
* address = "Charlottesville, VA, USA",
* number = "CS2010-03"
* }
*
* For more information, see our Google Code project site:
* http://code.google.com/p/back40computing/
*
* Thanks!
******************************************************************************/
/******************************************************************************
* Radix Sorting API
******************************************************************************/
#pragma once
#include <voxelpipe/b40c/KernelCommon/b40c_kernel_utils.cu>
#include <voxelpipe/b40c/LsbRadixSort/kernel/radixsort_kernel_common.cu>
namespace b40c {
// Debugging options
static bool RADIXSORT_DEBUG = false;
/**
* Class encapsulating device properties
*/
class CudaProperties
{
public:
// Information about our target device
cudaDeviceProp device_props;
int device_sm_version;
// Information about our kernel assembly
int kernel_ptx_version;
public:
CudaProperties()
{
// Get current device properties
int current_device;
cudaGetDevice(¤t_device);
cudaGetDeviceProperties(&device_props, current_device);
device_sm_version = device_props.major * 100 + device_props.minor * 10;
// Get SM version of compiled kernel assemblies
cudaFuncAttributes flush_kernel_attrs;
cudaFuncGetAttributes(&flush_kernel_attrs, FlushKernel<void>);
kernel_ptx_version = flush_kernel_attrs.ptxVersion * 10;
}
};
/**
* Base class for SRTS radix sorting enactors.
*/
template <typename K, typename V, typename Storage>
class BaseRadixSortingEnactor
{
protected:
/**
* Whether or not this instance can be used to sort satellite values
*/
static bool KeysOnly()
{
return IsKeysOnly<V>();
}
protected:
//Device properties
const CudaProperties cuda_props;
protected:
/**
* Constructor.
*/
BaseRadixSortingEnactor(const CudaProperties &props = CudaProperties()) :
cuda_props(props) {}
public:
/**
* Destructor
*/
virtual ~BaseRadixSortingEnactor() {}
/**
* Enacts a radix sorting operation on the specified device data.
*
* @return cudaSuccess on success, error enumeration otherwise
*/
virtual cudaError_t EnactSort(Storage &problem_storage) = 0;
};
}// namespace b40c
|
84de764e6b359827f529de280369e599421e8071.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "common.h"
#include <pthread.h>
#include <cstdio>
#include <getopt.h>
#include <libgen.h>
#include "hip/hip_runtime.h"
#if NCCL_MAJOR >= 2
ncclDataType_t test_types[ncclNumTypes] = {ncclInt8, ncclUint8, ncclInt32, ncclUint32, ncclInt64, ncclUint64, ncclHalf, ncclFloat, ncclDouble};
const char *test_typenames[ncclNumTypes] = {"int8", "uint8", "int32", "uint32", "int64", "uint64", "half", "float", "double"};
#else
ncclDataType_t test_types[ncclNumTypes] = {ncclChar, ncclInt, ncclHalf, ncclFloat, ncclDouble, ncclInt64, ncclUint64};
const char *test_typenames[ncclNumTypes] = {"char", "int", "half", "float", "double", "int64", "uint64"};
#endif
ncclRedOp_t test_ops[ncclNumOps] = {ncclSum, ncclProd, ncclMax, ncclMin};
const char *test_opnames[ncclNumOps] = {"sum", "prod", "max", "min"};
thread_local int is_main_thread = 0;
// Command line parameter defaults
static int nThreads = 1;
static int nGpus = 1;
static size_t minBytes = 32*1024*1024;
static size_t maxBytes = 32*1024*1024;
static size_t stepBytes = 1*1024*1024;
static size_t stepFactor = 1;
static int datacheck = 1;
static int warmup_iters = 5;
static int iters = 20;
static int agg_iters = 1;
static int ncclop = ncclSum;
static int nccltype = ncclFloat;
static int ncclroot = 0;
static int parallel_init = 0;
static int blocking_coll = 0;
double parsesize(char *value) {
long long int units;
double size;
if (strchr(value, 'G') != NULL) {
units=1024*1024*1024;
} else if (strchr(value, 'M') != NULL) {
units=1024*1024;
} else if (strchr(value, 'K') != NULL) {
units=1024;
} else {
units=1;
}
size = atof(value)*units;
return size;
}
double DeltaMaxValue(ncclDataType_t type) {
switch(type) {
case ncclHalf: return 1e-2;
case ncclFloat: return 1e-5;
case ncclDouble: return 1e-12;
case ncclInt:
#if NCCL_MAJOR >= 2
case ncclUint8:
//case ncclInt32:
case ncclUint32:
#endif
case ncclInt64:
case ncclUint64: return 1e-200;
}
return 1e-200;
}
template<typename T> __device__
double absDiff(T a, T b) {
return fabs((double)(b - a));
}
template<> __device__
double absDiff<half>(half a, half b) {
float x = __half2float(a);
float y = __half2float(b);
return fabs((double)(y-x));
}
template<typename T> __device__
float toFloat(T a) {
return (float)a;
}
template<> __device__
float toFloat(half a) {
return __half2float(a);
}
template<typename T, int BSIZE> __global__
void deltaKern(void* A_, void* B_, size_t count, double* max) {
const T* A = (const T*)A_;
const T* B = (const T*)B_;
__shared__ double temp[BSIZE];
int tid = threadIdx.x;
double locmax = 0.0;
for(int i=tid; i<count; i+=blockDim.x) {
double delta = absDiff(A[i], B[i]);
if( delta > locmax ) {
locmax = delta;
#ifdef DEBUG_PRINT
if (delta > .1) printf("Error at %d/%ld : %f != %f\n", i, count, toFloat(A[i]), toFloat(B[i]));
#endif
}
}
temp[tid] = locmax;
for(int stride = BSIZE/2; stride > 1; stride>>=1) {
__syncthreads();
if( tid < stride )
temp[tid] = temp[tid] > temp[tid+stride] ? temp[tid] : temp[tid+stride];
}
__syncthreads();
if( threadIdx.x == 0)
*max = temp[0] > temp[1] ? temp[0] : temp[1];
}
testResult_t CheckDelta(void* expected, void* results, size_t count, ncclDataType_t type, double* devmax) {
switch (type) {
case ncclHalf:
hipLaunchKernelGGL(( deltaKern<half, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break;
case ncclFloat:
hipLaunchKernelGGL(( deltaKern<float, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break;
case ncclDouble:
hipLaunchKernelGGL(( deltaKern<double, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break;
case ncclChar:
#if NCCL_MAJOR >= 2
case ncclUint8:
#endif
hipLaunchKernelGGL(( deltaKern<uint8_t, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break;
case ncclInt:
#if NCCL_MAJOR >= 2
case ncclUint32:
#endif
hipLaunchKernelGGL(( deltaKern<uint32_t, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break;
case ncclInt64:
case ncclUint64:
hipLaunchKernelGGL(( deltaKern<uint64_t, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break;
}
CUDACHECK(hipDeviceSynchronize());
return testSuccess;
}
// For integer values, we use values between 0 and 255
template<typename T>
__device__ T testValue(const size_t offset, const int rep, const int rank) {
uint8_t v = (rep+rank+offset) % 256;
return (T)v;
}
// For floating point datatype, we use values between 0 and 1 otherwise the
// Product operation will produce NaNs.
template<>
__device__ double testValue<double>(const size_t offset, const int rep, const int rank) {
return 1.0/(1.0+(double)testValue<int>(offset, rep, rank));
}
template<>
__device__ float testValue<float>(const size_t offset, const int rep, const int rank) {
return 1.0/(1.0+(float)testValue<int>(offset, rep, rank));
}
template<>
__device__ half testValue<half>(const size_t offset, const int rep, const int rank) {
return __float2half(testValue<float>(offset, rep, rank));
}
// Operations
template<typename T>
__device__ T ncclOpSum(T a, T b) { return a+b; }
template<typename T>
__device__ T ncclOpProd(T a, T b) { return a*b; }
template<typename T>
__device__ T ncclOpMax(T a, T b) { return a>b ? a : b; }
template<typename T>
__device__ T ncclOpMin(T a, T b) { return a<b ? a : b; }
// Definitions for half
template<>
__device__ half ncclOpSum(half a, half b) { return __float2half(__half2float(a)+__half2float(b)); }
template<>
__device__ half ncclOpProd(half a, half b) { return __float2half(__half2float(a)*__half2float(b)); }
template<>
__device__ half ncclOpMax(half a, half b) { return __half2float(a)>__half2float(b) ? a : b; }
template<>
__device__ half ncclOpMin(half a, half b) { return __half2float(a)<__half2float(b) ? a : b; }
template<typename T, T (*Op)(T, T)>
__global__ void InitDataReduceKernel(T* data, const size_t N, const size_t offset, const int rep, const int nranks) {
for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x) {
T val = testValue<T>(o+offset, rep, 0);
for (int i=1; i<nranks; i++) {
val = Op(val, testValue<T>(o+offset, rep, i));
}
data[o] = val;
}
}
#define KERN(type, op) (void*)InitDataReduceKernel<type, op<type>>
#define OPS(type) KERN(type, ncclOpSum), KERN(type, ncclOpProd), KERN(type, ncclOpMax), KERN(type, ncclOpMin)
static void* const redInitDataKerns[ncclNumOps*ncclNumTypes] = {
OPS(int8_t), OPS(uint8_t), OPS(int32_t), OPS(uint32_t), OPS(int64_t), OPS(uint64_t), OPS(half), OPS(float), OPS(double)
};
testResult_t InitDataReduce(void* data, const size_t count, const size_t offset, ncclDataType_t type, ncclRedOp_t op, const int rep, const int nranks) {
dim3 grid = { 32, 1, 1 };
dim3 block = { 256, 1, 1 };
void* args[5] = { (void*)&data, (void*)&count, (void*)&offset, (void*)&rep, (void*)&nranks };
CUDACHECK(cudaLaunchKernel(redInitDataKerns[type*ncclNumOps+op], grid, block, args, 0, hipStreamDefault));
return testSuccess;
}
template<typename T>
__global__ void InitDataKernel(T* data, const size_t N, const int rep, const int rank) {
for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x)
data[o] = testValue<T>(o, rep, rank);
}
static void* const initDataKerns[ncclNumTypes] = {
(void*)InitDataKernel< int8_t>,
(void*)InitDataKernel< uint8_t>,
(void*)InitDataKernel< int32_t>,
(void*)InitDataKernel<uint32_t>,
(void*)InitDataKernel< int64_t>,
(void*)InitDataKernel<uint64_t>,
(void*)InitDataKernel< half>,
(void*)InitDataKernel< float>,
(void*)InitDataKernel< double>
};
template<typename T>
testResult_t InitDataType(void* dest, const size_t N, const int rep, const int rank) {
T* ptr = (T*)dest;
hipLaunchKernelGGL(( InitDataKernel), dim3(16), dim3(512), 0, 0, ptr, N, rep, rank);
return testSuccess;
}
testResult_t InitData(void* data, const size_t count, ncclDataType_t type, const int rep, const int rank) {
dim3 grid = { 32, 1, 1 };
dim3 block = { 256, 1, 1 };
void* args[4] = { (void*)&data, (void*)&count, (void*)&rep, (void*)&rank };
CUDACHECK(cudaLaunchKernel(initDataKerns[type], grid, block, args, 0, hipStreamDefault));
return testSuccess;
}
void Barrier(struct threadArgs* args)
{
while (args->barrier[args->barrier_idx] != args->thread) pthread_yield();
args->barrier[args->barrier_idx] = args->thread + 1;
if (args->thread+1 == args->nThreads) {
#ifdef MPI_SUPPORT
MPI_Barrier(MPI_COMM_WORLD);
#endif
args->barrier[args->barrier_idx] = 0;
} else {
while (args->barrier[args->barrier_idx]) pthread_yield();
}
args->barrier_idx=!args->barrier_idx;
}
testResult_t CheckData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, double *delta) {
size_t count = args->expectedBytes/wordSize(type);
double maxDelta = 0.0;
for (int i=0; i<args->nGpus; i++) {
int device;
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
NCCLCHECK(ncclCommCuDevice(args->comms[i], &device));
CUDACHECK(hipSetDevice(device));
void *data = in_place ? ((void *)((uintptr_t)args->recvbuffs[i] + args->recvInplaceOffset*rank)) : args->recvbuffs[i];
TESTCHECK(CheckDelta(data , args->expected[i], count, type, args->delta));
maxDelta = ::max(*(args->deltaHost), maxDelta);
#ifdef DEBUG_PRINT
if (rank == 0) {
int *expectedHost = (int *)malloc(args->expectedBytes);
int *dataHost = (int *)malloc(args->expectedBytes);
hipMemcpy(expectedHost, args->expected[0], args->expectedBytes, hipMemcpyDeviceToHost);
printf("\n Expected: ");
for(int j=0; j<args->expectedBytes/sizeof(int); j++) {
printf("%d:%d ", j, expectedHost[j]);
}
printf("\n");
hipMemcpy(dataHost, data, args->expectedBytes, hipMemcpyDeviceToHost);
printf("\n Actual: ");
for (int j=0; j<args->expectedBytes/sizeof(int); j++) {
printf("%d:%d ", j, dataHost[j]);
}
printf("\n");
free(expectedHost);
free(dataHost);
}
#endif
}
double nranks = args->nProcs*args->nThreads*args->nGpus;
if (args->reportErrors && maxDelta > DeltaMaxValue(type)*(nranks - 1)) args->errors[0]++;
*delta = maxDelta;
return testSuccess;
}
testResult_t testStreamSynchronize(int ngpus, hipStream_t* streams, ncclComm_t* comms) {
hipError_t cudaErr;
int remaining = ngpus;
int* done = (int*)malloc(sizeof(int)*ngpus);
memset(done, 0, sizeof(int)*ngpus);
while (remaining) {
int idle = 1;
for (int i=0; i<ngpus; i++) {
if (done[i]) continue;
cudaErr = hipStreamQuery(streams[i]);
if (cudaErr == hipSuccess) {
done[i] = 1;
remaining--;
idle = 0;
continue;
}
if (cudaErr != hipErrorNotReady) CUDACHECK(cudaErr);
#if NCCL_VERSION_CODE >= NCCL_VERSION(2,4,0)
if (comms) {
ncclResult_t ncclAsyncErr;
NCCLCHECK(ncclCommGetAsyncError(comms[i], &ncclAsyncErr));
if (ncclAsyncErr != ncclSuccess) {
// An asynchronous error happened. Stop the operation and destroy
// the communicator
for (int i=0; i<ngpus; i++)
NCCLCHECK(ncclCommAbort(comms[i]));
// Abort the perf test
NCCLCHECK(ncclAsyncErr);
}
}
#endif
}
// We might want to let other threads (including NCCL threads) use the CPU.
if (idle) pthread_yield();
}
free(done);
return testSuccess;
}
testResult_t startColl(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, int iter) {
size_t count = args->nbytes / wordSize(type);
// Try to change offset for each iteration so that we avoid cache effects and catch race conditions in ptrExchange
size_t totalnbytes = max(args->sendBytes, args->expectedBytes);
size_t shift = (totalnbytes * iter) % args->maxbytes;
if (shift + totalnbytes > args->maxbytes) shift = 0;
if (args->nGpus > 1) NCCLCHECK(ncclGroupStart());
for (int i = 0; i < args->nGpus; i++) {
#ifndef NCCL_MAJOR
int cudaDev;
NCCLCHECK(ncclCommCuDevice(args->comms[i], &cudaDev));
CUDACHECK(hipSetDevice(cudaDev));
#endif
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
char* recvBuff = ((char*)args->recvbuffs[i]) + shift;
char* sendBuff = ((char*)args->sendbuffs[i]) + shift;
TESTCHECK(args->collTest->runColl(
(void*)(in_place ? recvBuff + args->sendInplaceOffset*rank : sendBuff),
(void*)(in_place ? recvBuff + args->recvInplaceOffset*rank : recvBuff),
count, type, op, root, args->comms[i], args->streams[i]));
}
if (args->nGpus > 1) NCCLCHECK(ncclGroupEnd());
if (blocking_coll) {
// Complete op before returning
TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms));
}
if (blocking_coll) Barrier(args);
return testSuccess;
}
testResult_t completeColl(struct threadArgs* args) {
if (blocking_coll) return testSuccess;
TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms));
return testSuccess;
}
testResult_t BenchTime(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place) {
size_t count = args->nbytes / wordSize(type);
// Sync
TESTCHECK(startColl(args, type, op, root, in_place, 0));
TESTCHECK(completeColl(args));
Barrier(args);
// Performance Benchmark
auto start = std::chrono::high_resolution_clock::now();
for (int iter = 0; iter < iters; iter++) {
if (agg_iters>1) NCCLCHECK(ncclGroupStart());
for (int aiter = 0; aiter < agg_iters; aiter++) {
TESTCHECK(startColl(args, type, op, root, in_place, iter*agg_iters+aiter));
}
if (agg_iters>1) NCCLCHECK(ncclGroupEnd());
}
TESTCHECK(completeColl(args));
auto delta = std::chrono::high_resolution_clock::now() - start;
double deltaSec = std::chrono::duration_cast<std::chrono::duration<double>>(delta).count();
deltaSec = deltaSec/(iters*agg_iters);
double algBw, busBw;
args->collTest->getBw(count, wordSize(type), deltaSec, &algBw, &busBw, args->nProcs*args->nThreads*args->nGpus);
Barrier(args);
double maxDelta = 0;
static __thread int rep = 0;
rep++;
if (datacheck) {
// Initialize sendbuffs, recvbuffs and expected
TESTCHECK(args->collTest->initData(args, type, op, root, rep, in_place));
//test validation in single itertion, should ideally be included into the multi-iteration run
TESTCHECK(startColl(args, type, op, root, in_place, 0));
TESTCHECK(completeColl(args));
TESTCHECK(CheckData(args, type, op, root, in_place, &maxDelta));
//aggregate delta from all threads and procs
Barrier(args);
if (args->thread == 0) {
for (int i=1; i<args->nThreads; i++) {
maxDelta += args->deltaThreads[i];
}
#ifdef MPI_SUPPORT
MPI_Allreduce(MPI_IN_PLACE, &maxDelta, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
#endif
}
Barrier(args);
}
double timeUsec = deltaSec*1.0E6;
char timeStr[10];
if (timeUsec > 10000.0) {
sprintf(timeStr, "%7.0f", timeUsec);
} else if (timeUsec > 100.0) {
sprintf(timeStr, "%7.1f", timeUsec);
} else {
sprintf(timeStr, "%7.2f", timeUsec);
}
if (datacheck) {
PRINT(" %7s %6.2f %6.2f %5.0le", timeStr, algBw, busBw, maxDelta);
} else {
PRINT(" %7s %6.2f %6.2f %5s", timeStr, algBw, busBw, "N/A");
}
args->bw[0] += busBw;
args->bw_count[0]++;
return testSuccess;
}
void setupArgs(size_t size, ncclDataType_t type, struct threadArgs* args) {
int nranks = args->nProcs*args->nGpus*args->nThreads;
size_t count, sendCount, recvCount, paramCount, sendInplaceOffset, recvInplaceOffset;
count = size / wordSize(type);
args->collTest->getCollByteCount(&sendCount, &recvCount, ¶mCount, &sendInplaceOffset, &recvInplaceOffset, (size_t)count, (size_t)nranks);
args->nbytes = paramCount * wordSize(type);
args->sendBytes = sendCount * wordSize(type);
args->expectedBytes = recvCount * wordSize(type);
args->sendInplaceOffset = sendInplaceOffset * wordSize(type);
args->recvInplaceOffset = recvInplaceOffset * wordSize(type);
}
testResult_t TimeTest(struct threadArgs* args, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName, int root) {
// Warm-up for large size
setupArgs(args->maxbytes, type, args);
for (int iter = 0; iter < warmup_iters; iter++) {
TESTCHECK(startColl(args, type, op, root, 0, iter));
}
TESTCHECK(completeColl(args));
// Warm-up for small size
setupArgs(args->minbytes, type, args);
for (int iter = 0; iter < warmup_iters; iter++) {
TESTCHECK(startColl(args, type, op, root, 0, iter));
}
TESTCHECK(completeColl(args));
// Benchmark
for (size_t size = args->minbytes; size<=args->maxbytes; size = ((args->stepfactor > 1) ? size*args->stepfactor : size+args->stepbytes)) {
setupArgs(size, type, args);
print_line_header(max(args->sendBytes, args->expectedBytes), args->nbytes / wordSize(type), typeName, opName, root);
TESTCHECK(BenchTime(args, type, op, root, 0));
TESTCHECK(BenchTime(args, type, op, root, 1));
PRINT("\n");
}
return testSuccess;
}
testResult_t threadRunTests(struct threadArgs* args) {
// Set device to the first of our GPUs. If we don't do that, some operations
// will be done on the current GPU (by default : 0) and if the GPUs are in
// exclusive mode those operations will fail.
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus;
CUDACHECK(hipSetDevice(gpuid));
TESTCHECK(ncclTestEngine.runTest(args, ncclroot, (ncclDataType_t)nccltype, test_typenames[nccltype], (ncclRedOp_t)ncclop, test_opnames[ncclop]));
return testSuccess;
}
testResult_t threadInit(struct threadArgs* args) {
char hostname[1024];
getHostName(hostname, 1024);
int nranks = args->nProcs*args->nThreads*args->nGpus;
//set main thread again
is_main_thread = (args->proc == 0 && args->thread == 0) ? 1 : 0;
NCCLCHECK(ncclGroupStart());
for (int i=0; i<args->nGpus; i++) {
int rank = args->proc*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(hipSetDevice(gpuid));
NCCLCHECK(ncclCommInitRank(args->comms+i, nranks, args->ncclId, rank));
}
NCCLCHECK(ncclGroupEnd());
TESTCHECK(threadRunTests(args));
for (int i=0; i<args->nGpus; i++) {
NCCLCHECK(ncclCommDestroy(args->comms[i]));
}
return testSuccess;
}
void* threadLauncher(void* thread_) {
struct testThread* thread = (struct testThread*)thread_;
thread->ret = thread->func(&thread->args);
return NULL;
}
testResult_t threadLaunch(struct testThread* thread) {
pthread_create(&thread->thread, NULL, threadLauncher, thread);
return testSuccess;
}
testResult_t AllocateBuffs(void **sendbuff, size_t sendBytes, void **recvbuff, size_t recvBytes, void **expected, size_t nbytes, int nranks) {
CUDACHECK(hipMalloc(sendbuff, nbytes));
CUDACHECK(hipMalloc(recvbuff, nbytes));
CUDACHECK(hipMalloc(expected, recvBytes));
return testSuccess;
}
testResult_t run(); // Main function
int main(int argc, char* argv[]) {
// Make sure everyline is flushed so that we see the progress of the test
setlinebuf(stdout);
// Parse args
int longindex;
static struct option longopts[] = {
{"nthreads", required_argument, 0, 't'},
{"ngpus", required_argument, 0, 'g'},
{"minbytes", required_argument, 0, 'b'},
{"maxbytes", required_argument, 0, 'e'},
{"stepbytes", required_argument, 0, 'i'},
{"stepfactor", required_argument, 0, 'f'},
{"iters", required_argument, 0, 'n'},
{"agg_iters", required_argument, 0, 'm'},
{"warmup_iters", required_argument, 0, 'w'},
{"parallel_init", required_argument, 0, 'p'},
{"check", required_argument, 0, 'c'},
{"op", required_argument, 0, 'o'},
{"datatype", required_argument, 0, 'd'},
{"root", required_argument, 0, 'r'},
{"blocking", required_argument, 0, 'z'},
{"help", no_argument, 0, 'h'}
};
while(1) {
int c;
c = getopt_long(argc, argv, "t:g:b:e:i:f:n:m:w:p:c:o:d:r:z:h", longopts, &longindex);
if (c == -1)
break;
switch(c) {
case 't':
nThreads = strtol(optarg, NULL, 0);
break;
case 'g':
nGpus = strtol(optarg, NULL, 0);
break;
case 'b':
minBytes = (size_t)parsesize(optarg);
break;
case 'e':
maxBytes = (size_t)parsesize(optarg);
break;
case 'i':
stepBytes = strtol(optarg, NULL, 0);
break;
case 'f':
stepFactor = strtol(optarg, NULL, 0);
break;
case 'n':
iters = (int)strtol(optarg, NULL, 0);
break;
case 'm':
#if NCCL_MAJOR >= 2 && NCCL_MINOR >= 2
agg_iters = (int)strtol(optarg, NULL, 0);
#else
printf("Option -m not supported before NCCL 2.2. Ignoring\n");
#endif
break;
case 'w':
warmup_iters = (int)strtol(optarg, NULL, 0);
break;
case 'c':
datacheck = (int)strtol(optarg, NULL, 0);
break;
case 'p':
parallel_init = (int)strtol(optarg, NULL, 0);
break;
case 'o':
ncclop = ncclstringtoop(optarg);
break;
case 'd':
nccltype = ncclstringtotype(optarg);
break;
case 'r':
ncclroot = strtol(optarg, NULL, 0);
break;
case 'z':
blocking_coll = strtol(optarg, NULL, 0);
break;
case 'h':
printf("USAGE: %s \n\t"
"[-t,--nthreads <num threads>] \n\t"
"[-g,--ngpus <gpus per thread>] \n\t"
"[-b,--minbytes <min size in bytes>] \n\t"
"[-e,--maxbytes <max size in bytes>] \n\t"
"[-i,--stepbytes <increment size>] \n\t"
"[-f,--stepfactor <increment factor>] \n\t"
"[-n,--iters <iteration count>] \n\t"
"[-m,--agg_iters <aggregated iteration count>] \n\t"
"[-w,--warmup_iters <warmup iteration count>] \n\t"
"[-p,--parallel_init <0/1>] \n\t"
"[-c,--check <0/1>] \n\t"
"[-o,--op <sum/prod/min/max/all>] \n\t"
"[-d,--datatype <nccltype/all>] \n\t"
"[-r,--root <root>] \n\t"
"[-z,--blocking <0/1>] \n\t"
"[-h,--help]\n",
basename(argv[0]));
return 0;
default:
printf("invalid option \n");
printf("USAGE: %s \n\t"
"[-t,--nthreads <num threads>] \n\t"
"[-g,--ngpus <gpus per thread>] \n\t"
"[-b,--minbytes <min size in bytes>] \n\t"
"[-e,--maxbytes <max size in bytes>] \n\t"
"[-i,--stepbytes <increment size>] \n\t"
"[-f,--stepfactor <increment factor>] \n\t"
"[-n,--iters <iteration count>] \n\t"
"[-m,--agg_iters <aggregated iteration count>] \n\t"
"[-w,--warmup_iters <warmup iteration count>] \n\t"
"[-p,--parallel_init <0/1>] \n\t"
"[-c,--check <0/1>] \n\t"
"[-o,--op <sum/prod/min/max/all>] \n\t"
"[-d,--datatype <nccltype/all>] \n\t"
"[-r,--root <root>] \n\t"
"[-z,--blocking <0/1>] \n\t"
"[-h,--help]\n",
basename(argv[0]));
return 0;
}
}
#ifdef MPI_SUPPORT
MPI_Init(&argc, &argv);
#endif
return run();
}
testResult_t run() {
int nProcs = 1, proc = 0;
int localRank = 0;
char hostname[1024];
getHostName(hostname, 1024);
#ifdef MPI_SUPPORT
MPI_Comm_size(MPI_COMM_WORLD, &nProcs);
MPI_Comm_rank(MPI_COMM_WORLD, &proc);
uint64_t hostHashs[nProcs];
hostHashs[proc] = getHostHash(hostname);
MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, hostHashs, sizeof(uint64_t), MPI_BYTE, MPI_COMM_WORLD);
for (int p=0; p<nProcs; p++) {
if (p == proc) break;
if (hostHashs[p] == hostHashs[proc]) localRank++;
}
#endif
is_main_thread = (proc == 0) ? 1 : 0;
PRINT("# nThread %d nGpus %d minBytes %ld maxBytes %ld step: %ld(%s) warmup iters: %d iters: %d validation: %d \n", nThreads, nGpus, minBytes, maxBytes,
(stepFactor > 1)?stepFactor:stepBytes, (stepFactor > 1)?"factor":"bytes", warmup_iters, iters, datacheck);
if (blocking_coll) PRINT("# Blocking Enabled: wait for completion and barrier after each collective \n");
if (parallel_init) PRINT("# Parallel Init Enabled: threads call into NcclInitRank concurrently \n");
PRINT("#\n");
PRINT("# Using devices\n");
#define MAX_LINE 2048
char line[MAX_LINE];
int len = 0;
for (int i=0; i<nThreads*nGpus; i++) {
int cudaDev = localRank*nThreads*nGpus+i;
int rank = proc*nThreads*nGpus+i;
hipDeviceProp_t prop;
CUDACHECK(hipGetDeviceProperties(&prop, cudaDev));
len += snprintf(line+len, MAX_LINE-len, "# Rank %2d Pid %6d on %10s device %2d [0x%02x] %s\n",
rank, getpid(), hostname, cudaDev, prop.pciBusID, prop.name);
}
#if MPI_SUPPORT
char *lines = (proc == 0) ? (char *)malloc(nProcs*MAX_LINE) : NULL;
// Gather all output in rank order to root (0)
MPI_Gather(line, MAX_LINE, MPI_BYTE, lines, MAX_LINE, MPI_BYTE, 0, MPI_COMM_WORLD);
if (proc == 0) {
for (int p = 0; p < nProcs; p++)
PRINT("%s", lines+MAX_LINE*p);
free(lines);
}
#else
PRINT("%s", line);
#endif
ncclUniqueId ncclId;
if (proc == 0) {
NCCLCHECK(ncclGetUniqueId(&ncclId));
}
#ifdef MPI_SUPPORT
MPI_Bcast(&ncclId, sizeof(ncclId), MPI_BYTE, 0, MPI_COMM_WORLD);
#endif
hipStream_t streams[nGpus*nThreads];
void* sendbuffs[nGpus*nThreads];
void* recvbuffs[nGpus*nThreads];
void* expected[nGpus*nThreads];
size_t sendBytes, recvBytes;
ncclTestEngine.getBuffSize(&sendBytes, &recvBytes, (size_t)maxBytes, (size_t)nProcs*nGpus*nThreads);
for (int i=0; i<nGpus*nThreads; i++) {
CUDACHECK(hipSetDevice(localRank*nThreads*nGpus+i));
AllocateBuffs(sendbuffs+i, sendBytes, recvbuffs+i, recvBytes, expected+i, (size_t)maxBytes, nProcs*nThreads*nGpus);
CUDACHECK(hipStreamCreateWithFlags(streams+i, hipStreamNonBlocking));
}
//if parallel init is not selected, use main thread to initialize NCCL
ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nThreads*nGpus);
if (!parallel_init) {
if (nProcs == 1) {
int gpuArray[nGpus*nThreads];
for (int i=0; i<nGpus*nThreads; i++) gpuArray[i] = i;
NCCLCHECK(ncclCommInitAll(comms, nGpus*nThreads, gpuArray));
} else {
NCCLCHECK(ncclGroupStart());
for (int i=0; i<nGpus*nThreads; i++) {
CUDACHECK(hipSetDevice(localRank*nThreads*nGpus+i));
NCCLCHECK(ncclCommInitRank(comms+i, nProcs*nThreads*nGpus, ncclId, proc*nThreads*nGpus+i));
}
NCCLCHECK(ncclGroupEnd());
}
}
int errors[nThreads];
double bw[nThreads];
double* delta;
CUDACHECK(hipHostMalloc(&delta, sizeof(double)*nThreads, hipHostMallocPortable | hipHostMallocMapped));
int bw_count[nThreads];
for (int t=0; t<nThreads; t++) {
bw[t] = 0.0;
errors[t] = bw_count[t] = 0;
}
PRINT("#\n");
print_header();
int* sync = (int*)calloc(2, sizeof(int));
int* barrier = (int*)calloc(2, sizeof(int));
struct testThread threads[nThreads];
memset(threads, 0, sizeof(struct testThread)*nThreads);
for (int t=nThreads-1; t>=0; t--) {
threads[t].args.minbytes=minBytes;
threads[t].args.maxbytes=maxBytes;
threads[t].args.stepbytes=stepBytes;
threads[t].args.stepfactor=stepFactor;
threads[t].args.localRank = localRank;
threads[t].args.nProcs=nProcs;
threads[t].args.proc=proc;
threads[t].args.nThreads=nThreads;
threads[t].args.thread=t;
threads[t].args.nGpus=nGpus;
threads[t].args.sendbuffs = sendbuffs+t*nGpus;
threads[t].args.recvbuffs = recvbuffs+t*nGpus;
threads[t].args.expected = expected+t*nGpus;
threads[t].args.ncclId = ncclId;
threads[t].args.comms=comms+t*nGpus;
threads[t].args.streams=streams+t*nGpus;
threads[t].args.barrier = (volatile int*)barrier;
threads[t].args.barrier_idx = 0;
threads[t].args.sync = (volatile int*)sync;
threads[t].args.sync_idx = 0;
threads[t].args.deltaThreads = delta;
threads[t].args.deltaHost = (delta + t);
threads[t].args.delta = delta;
threads[t].args.errors=errors+t;
threads[t].args.bw=bw+t;
threads[t].args.bw_count=bw_count+t;
threads[t].args.reportErrors = 1;
threads[t].func = parallel_init ? threadInit : threadRunTests;
if (t)
TESTCHECK(threadLaunch(threads+t));
else
TESTCHECK(threads[t].func(&threads[t].args));
}
// Wait for other threads and accumulate stats and errors
for (int t=nThreads-1; t>=0; t--) {
if (t) pthread_join(threads[t].thread, NULL);
TESTCHECK(threads[t].ret);
if (t) {
errors[0] += errors[t];
bw[0] += bw[t];
bw_count[0] += bw_count[t];
}
}
#ifdef MPI_SUPPORT
MPI_Allreduce(MPI_IN_PLACE, &errors[0], 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
#endif
if (!parallel_init) {
for(int i=0; i<nGpus*nThreads; ++i)
NCCLCHECK(ncclCommDestroy(comms[i]));
free(comms);
}
// Free off CUDA allocated memory
for (int i=0; i<nGpus*nThreads; i++) {
CUDACHECK(hipFree(sendbuffs[i]));
CUDACHECK(hipFree(recvbuffs[i]));
CUDACHECK(hipFree(expected[i]));
}
CUDACHECK(hipHostFree(delta));
char* str = getenv("NCCL_TESTS_MIN_BW");
double check_avg_bw = str ? atof(str) : -1;
bw[0] /= bw_count[0];
PRINT("# Out of bounds values : %d %s\n", errors[0], errors[0] ? "FAILED" : "OK");
PRINT("# Avg bus bandwidth : %g %s\n", bw[0], check_avg_bw == -1 ? "" : (bw[0] < check_avg_bw*(0.9) ? "FAILED" : "OK"));
PRINT("#\n");
#ifdef MPI_SUPPORT
MPI_Finalize();
#endif
// 'cuda-memcheck --leak-check full' requires this
hipDeviceReset();
if (errors[0] || bw[0] < check_avg_bw*(0.9))
exit(EXIT_FAILURE);
else
exit(EXIT_SUCCESS);
}
| 84de764e6b359827f529de280369e599421e8071.cu | /*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "common.h"
#include <pthread.h>
#include <cstdio>
#include <getopt.h>
#include <libgen.h>
#include "cuda.h"
#if NCCL_MAJOR >= 2
ncclDataType_t test_types[ncclNumTypes] = {ncclInt8, ncclUint8, ncclInt32, ncclUint32, ncclInt64, ncclUint64, ncclHalf, ncclFloat, ncclDouble};
const char *test_typenames[ncclNumTypes] = {"int8", "uint8", "int32", "uint32", "int64", "uint64", "half", "float", "double"};
#else
ncclDataType_t test_types[ncclNumTypes] = {ncclChar, ncclInt, ncclHalf, ncclFloat, ncclDouble, ncclInt64, ncclUint64};
const char *test_typenames[ncclNumTypes] = {"char", "int", "half", "float", "double", "int64", "uint64"};
#endif
ncclRedOp_t test_ops[ncclNumOps] = {ncclSum, ncclProd, ncclMax, ncclMin};
const char *test_opnames[ncclNumOps] = {"sum", "prod", "max", "min"};
thread_local int is_main_thread = 0;
// Command line parameter defaults
static int nThreads = 1;
static int nGpus = 1;
static size_t minBytes = 32*1024*1024;
static size_t maxBytes = 32*1024*1024;
static size_t stepBytes = 1*1024*1024;
static size_t stepFactor = 1;
static int datacheck = 1;
static int warmup_iters = 5;
static int iters = 20;
static int agg_iters = 1;
static int ncclop = ncclSum;
static int nccltype = ncclFloat;
static int ncclroot = 0;
static int parallel_init = 0;
static int blocking_coll = 0;
double parsesize(char *value) {
long long int units;
double size;
if (strchr(value, 'G') != NULL) {
units=1024*1024*1024;
} else if (strchr(value, 'M') != NULL) {
units=1024*1024;
} else if (strchr(value, 'K') != NULL) {
units=1024;
} else {
units=1;
}
size = atof(value)*units;
return size;
}
double DeltaMaxValue(ncclDataType_t type) {
switch(type) {
case ncclHalf: return 1e-2;
case ncclFloat: return 1e-5;
case ncclDouble: return 1e-12;
case ncclInt:
#if NCCL_MAJOR >= 2
case ncclUint8:
//case ncclInt32:
case ncclUint32:
#endif
case ncclInt64:
case ncclUint64: return 1e-200;
}
return 1e-200;
}
template<typename T> __device__
double absDiff(T a, T b) {
return fabs((double)(b - a));
}
template<> __device__
double absDiff<half>(half a, half b) {
float x = __half2float(a);
float y = __half2float(b);
return fabs((double)(y-x));
}
template<typename T> __device__
float toFloat(T a) {
return (float)a;
}
template<> __device__
float toFloat(half a) {
return __half2float(a);
}
template<typename T, int BSIZE> __global__
void deltaKern(void* A_, void* B_, size_t count, double* max) {
const T* A = (const T*)A_;
const T* B = (const T*)B_;
__shared__ double temp[BSIZE];
int tid = threadIdx.x;
double locmax = 0.0;
for(int i=tid; i<count; i+=blockDim.x) {
double delta = absDiff(A[i], B[i]);
if( delta > locmax ) {
locmax = delta;
#ifdef DEBUG_PRINT
if (delta > .1) printf("Error at %d/%ld : %f != %f\n", i, count, toFloat(A[i]), toFloat(B[i]));
#endif
}
}
temp[tid] = locmax;
for(int stride = BSIZE/2; stride > 1; stride>>=1) {
__syncthreads();
if( tid < stride )
temp[tid] = temp[tid] > temp[tid+stride] ? temp[tid] : temp[tid+stride];
}
__syncthreads();
if( threadIdx.x == 0)
*max = temp[0] > temp[1] ? temp[0] : temp[1];
}
testResult_t CheckDelta(void* expected, void* results, size_t count, ncclDataType_t type, double* devmax) {
switch (type) {
case ncclHalf:
deltaKern<half, 512><<<1, 512>>>(results, expected, count, devmax); break;
case ncclFloat:
deltaKern<float, 512><<<1, 512>>>(results, expected, count, devmax); break;
case ncclDouble:
deltaKern<double, 512><<<1, 512>>>(results, expected, count, devmax); break;
case ncclChar:
#if NCCL_MAJOR >= 2
case ncclUint8:
#endif
deltaKern<uint8_t, 512><<<1, 512>>>(results, expected, count, devmax); break;
case ncclInt:
#if NCCL_MAJOR >= 2
case ncclUint32:
#endif
deltaKern<uint32_t, 512><<<1, 512>>>(results, expected, count, devmax); break;
case ncclInt64:
case ncclUint64:
deltaKern<uint64_t, 512><<<1, 512>>>(results, expected, count, devmax); break;
}
CUDACHECK(cudaDeviceSynchronize());
return testSuccess;
}
// For integer values, we use values between 0 and 255
template<typename T>
__device__ T testValue(const size_t offset, const int rep, const int rank) {
uint8_t v = (rep+rank+offset) % 256;
return (T)v;
}
// For floating point datatype, we use values between 0 and 1 otherwise the
// Product operation will produce NaNs.
template<>
__device__ double testValue<double>(const size_t offset, const int rep, const int rank) {
return 1.0/(1.0+(double)testValue<int>(offset, rep, rank));
}
template<>
__device__ float testValue<float>(const size_t offset, const int rep, const int rank) {
return 1.0/(1.0+(float)testValue<int>(offset, rep, rank));
}
template<>
__device__ half testValue<half>(const size_t offset, const int rep, const int rank) {
return __float2half(testValue<float>(offset, rep, rank));
}
// Operations
template<typename T>
__device__ T ncclOpSum(T a, T b) { return a+b; }
template<typename T>
__device__ T ncclOpProd(T a, T b) { return a*b; }
template<typename T>
__device__ T ncclOpMax(T a, T b) { return a>b ? a : b; }
template<typename T>
__device__ T ncclOpMin(T a, T b) { return a<b ? a : b; }
// Definitions for half
template<>
__device__ half ncclOpSum(half a, half b) { return __float2half(__half2float(a)+__half2float(b)); }
template<>
__device__ half ncclOpProd(half a, half b) { return __float2half(__half2float(a)*__half2float(b)); }
template<>
__device__ half ncclOpMax(half a, half b) { return __half2float(a)>__half2float(b) ? a : b; }
template<>
__device__ half ncclOpMin(half a, half b) { return __half2float(a)<__half2float(b) ? a : b; }
template<typename T, T (*Op)(T, T)>
__global__ void InitDataReduceKernel(T* data, const size_t N, const size_t offset, const int rep, const int nranks) {
for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x) {
T val = testValue<T>(o+offset, rep, 0);
for (int i=1; i<nranks; i++) {
val = Op(val, testValue<T>(o+offset, rep, i));
}
data[o] = val;
}
}
#define KERN(type, op) (void*)InitDataReduceKernel<type, op<type>>
#define OPS(type) KERN(type, ncclOpSum), KERN(type, ncclOpProd), KERN(type, ncclOpMax), KERN(type, ncclOpMin)
static void* const redInitDataKerns[ncclNumOps*ncclNumTypes] = {
OPS(int8_t), OPS(uint8_t), OPS(int32_t), OPS(uint32_t), OPS(int64_t), OPS(uint64_t), OPS(half), OPS(float), OPS(double)
};
testResult_t InitDataReduce(void* data, const size_t count, const size_t offset, ncclDataType_t type, ncclRedOp_t op, const int rep, const int nranks) {
dim3 grid = { 32, 1, 1 };
dim3 block = { 256, 1, 1 };
void* args[5] = { (void*)&data, (void*)&count, (void*)&offset, (void*)&rep, (void*)&nranks };
CUDACHECK(cudaLaunchKernel(redInitDataKerns[type*ncclNumOps+op], grid, block, args, 0, cudaStreamDefault));
return testSuccess;
}
template<typename T>
__global__ void InitDataKernel(T* data, const size_t N, const int rep, const int rank) {
for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x)
data[o] = testValue<T>(o, rep, rank);
}
static void* const initDataKerns[ncclNumTypes] = {
(void*)InitDataKernel< int8_t>,
(void*)InitDataKernel< uint8_t>,
(void*)InitDataKernel< int32_t>,
(void*)InitDataKernel<uint32_t>,
(void*)InitDataKernel< int64_t>,
(void*)InitDataKernel<uint64_t>,
(void*)InitDataKernel< half>,
(void*)InitDataKernel< float>,
(void*)InitDataKernel< double>
};
template<typename T>
testResult_t InitDataType(void* dest, const size_t N, const int rep, const int rank) {
T* ptr = (T*)dest;
InitDataKernel<<<16, 512>>>(ptr, N, rep, rank);
return testSuccess;
}
testResult_t InitData(void* data, const size_t count, ncclDataType_t type, const int rep, const int rank) {
dim3 grid = { 32, 1, 1 };
dim3 block = { 256, 1, 1 };
void* args[4] = { (void*)&data, (void*)&count, (void*)&rep, (void*)&rank };
CUDACHECK(cudaLaunchKernel(initDataKerns[type], grid, block, args, 0, cudaStreamDefault));
return testSuccess;
}
void Barrier(struct threadArgs* args)
{
while (args->barrier[args->barrier_idx] != args->thread) pthread_yield();
args->barrier[args->barrier_idx] = args->thread + 1;
if (args->thread+1 == args->nThreads) {
#ifdef MPI_SUPPORT
MPI_Barrier(MPI_COMM_WORLD);
#endif
args->barrier[args->barrier_idx] = 0;
} else {
while (args->barrier[args->barrier_idx]) pthread_yield();
}
args->barrier_idx=!args->barrier_idx;
}
testResult_t CheckData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, double *delta) {
size_t count = args->expectedBytes/wordSize(type);
double maxDelta = 0.0;
for (int i=0; i<args->nGpus; i++) {
int device;
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
NCCLCHECK(ncclCommCuDevice(args->comms[i], &device));
CUDACHECK(cudaSetDevice(device));
void *data = in_place ? ((void *)((uintptr_t)args->recvbuffs[i] + args->recvInplaceOffset*rank)) : args->recvbuffs[i];
TESTCHECK(CheckDelta(data , args->expected[i], count, type, args->delta));
maxDelta = std::max(*(args->deltaHost), maxDelta);
#ifdef DEBUG_PRINT
if (rank == 0) {
int *expectedHost = (int *)malloc(args->expectedBytes);
int *dataHost = (int *)malloc(args->expectedBytes);
cudaMemcpy(expectedHost, args->expected[0], args->expectedBytes, cudaMemcpyDeviceToHost);
printf("\n Expected: ");
for(int j=0; j<args->expectedBytes/sizeof(int); j++) {
printf("%d:%d ", j, expectedHost[j]);
}
printf("\n");
cudaMemcpy(dataHost, data, args->expectedBytes, cudaMemcpyDeviceToHost);
printf("\n Actual: ");
for (int j=0; j<args->expectedBytes/sizeof(int); j++) {
printf("%d:%d ", j, dataHost[j]);
}
printf("\n");
free(expectedHost);
free(dataHost);
}
#endif
}
double nranks = args->nProcs*args->nThreads*args->nGpus;
if (args->reportErrors && maxDelta > DeltaMaxValue(type)*(nranks - 1)) args->errors[0]++;
*delta = maxDelta;
return testSuccess;
}
testResult_t testStreamSynchronize(int ngpus, cudaStream_t* streams, ncclComm_t* comms) {
cudaError_t cudaErr;
int remaining = ngpus;
int* done = (int*)malloc(sizeof(int)*ngpus);
memset(done, 0, sizeof(int)*ngpus);
while (remaining) {
int idle = 1;
for (int i=0; i<ngpus; i++) {
if (done[i]) continue;
cudaErr = cudaStreamQuery(streams[i]);
if (cudaErr == cudaSuccess) {
done[i] = 1;
remaining--;
idle = 0;
continue;
}
if (cudaErr != cudaErrorNotReady) CUDACHECK(cudaErr);
#if NCCL_VERSION_CODE >= NCCL_VERSION(2,4,0)
if (comms) {
ncclResult_t ncclAsyncErr;
NCCLCHECK(ncclCommGetAsyncError(comms[i], &ncclAsyncErr));
if (ncclAsyncErr != ncclSuccess) {
// An asynchronous error happened. Stop the operation and destroy
// the communicator
for (int i=0; i<ngpus; i++)
NCCLCHECK(ncclCommAbort(comms[i]));
// Abort the perf test
NCCLCHECK(ncclAsyncErr);
}
}
#endif
}
// We might want to let other threads (including NCCL threads) use the CPU.
if (idle) pthread_yield();
}
free(done);
return testSuccess;
}
testResult_t startColl(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, int iter) {
size_t count = args->nbytes / wordSize(type);
// Try to change offset for each iteration so that we avoid cache effects and catch race conditions in ptrExchange
size_t totalnbytes = max(args->sendBytes, args->expectedBytes);
size_t shift = (totalnbytes * iter) % args->maxbytes;
if (shift + totalnbytes > args->maxbytes) shift = 0;
if (args->nGpus > 1) NCCLCHECK(ncclGroupStart());
for (int i = 0; i < args->nGpus; i++) {
#ifndef NCCL_MAJOR
int cudaDev;
NCCLCHECK(ncclCommCuDevice(args->comms[i], &cudaDev));
CUDACHECK(cudaSetDevice(cudaDev));
#endif
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
char* recvBuff = ((char*)args->recvbuffs[i]) + shift;
char* sendBuff = ((char*)args->sendbuffs[i]) + shift;
TESTCHECK(args->collTest->runColl(
(void*)(in_place ? recvBuff + args->sendInplaceOffset*rank : sendBuff),
(void*)(in_place ? recvBuff + args->recvInplaceOffset*rank : recvBuff),
count, type, op, root, args->comms[i], args->streams[i]));
}
if (args->nGpus > 1) NCCLCHECK(ncclGroupEnd());
if (blocking_coll) {
// Complete op before returning
TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms));
}
if (blocking_coll) Barrier(args);
return testSuccess;
}
testResult_t completeColl(struct threadArgs* args) {
if (blocking_coll) return testSuccess;
TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms));
return testSuccess;
}
testResult_t BenchTime(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place) {
size_t count = args->nbytes / wordSize(type);
// Sync
TESTCHECK(startColl(args, type, op, root, in_place, 0));
TESTCHECK(completeColl(args));
Barrier(args);
// Performance Benchmark
auto start = std::chrono::high_resolution_clock::now();
for (int iter = 0; iter < iters; iter++) {
if (agg_iters>1) NCCLCHECK(ncclGroupStart());
for (int aiter = 0; aiter < agg_iters; aiter++) {
TESTCHECK(startColl(args, type, op, root, in_place, iter*agg_iters+aiter));
}
if (agg_iters>1) NCCLCHECK(ncclGroupEnd());
}
TESTCHECK(completeColl(args));
auto delta = std::chrono::high_resolution_clock::now() - start;
double deltaSec = std::chrono::duration_cast<std::chrono::duration<double>>(delta).count();
deltaSec = deltaSec/(iters*agg_iters);
double algBw, busBw;
args->collTest->getBw(count, wordSize(type), deltaSec, &algBw, &busBw, args->nProcs*args->nThreads*args->nGpus);
Barrier(args);
double maxDelta = 0;
static __thread int rep = 0;
rep++;
if (datacheck) {
// Initialize sendbuffs, recvbuffs and expected
TESTCHECK(args->collTest->initData(args, type, op, root, rep, in_place));
//test validation in single itertion, should ideally be included into the multi-iteration run
TESTCHECK(startColl(args, type, op, root, in_place, 0));
TESTCHECK(completeColl(args));
TESTCHECK(CheckData(args, type, op, root, in_place, &maxDelta));
//aggregate delta from all threads and procs
Barrier(args);
if (args->thread == 0) {
for (int i=1; i<args->nThreads; i++) {
maxDelta += args->deltaThreads[i];
}
#ifdef MPI_SUPPORT
MPI_Allreduce(MPI_IN_PLACE, &maxDelta, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
#endif
}
Barrier(args);
}
double timeUsec = deltaSec*1.0E6;
char timeStr[10];
if (timeUsec > 10000.0) {
sprintf(timeStr, "%7.0f", timeUsec);
} else if (timeUsec > 100.0) {
sprintf(timeStr, "%7.1f", timeUsec);
} else {
sprintf(timeStr, "%7.2f", timeUsec);
}
if (datacheck) {
PRINT(" %7s %6.2f %6.2f %5.0le", timeStr, algBw, busBw, maxDelta);
} else {
PRINT(" %7s %6.2f %6.2f %5s", timeStr, algBw, busBw, "N/A");
}
args->bw[0] += busBw;
args->bw_count[0]++;
return testSuccess;
}
void setupArgs(size_t size, ncclDataType_t type, struct threadArgs* args) {
int nranks = args->nProcs*args->nGpus*args->nThreads;
size_t count, sendCount, recvCount, paramCount, sendInplaceOffset, recvInplaceOffset;
count = size / wordSize(type);
args->collTest->getCollByteCount(&sendCount, &recvCount, ¶mCount, &sendInplaceOffset, &recvInplaceOffset, (size_t)count, (size_t)nranks);
args->nbytes = paramCount * wordSize(type);
args->sendBytes = sendCount * wordSize(type);
args->expectedBytes = recvCount * wordSize(type);
args->sendInplaceOffset = sendInplaceOffset * wordSize(type);
args->recvInplaceOffset = recvInplaceOffset * wordSize(type);
}
testResult_t TimeTest(struct threadArgs* args, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName, int root) {
// Warm-up for large size
setupArgs(args->maxbytes, type, args);
for (int iter = 0; iter < warmup_iters; iter++) {
TESTCHECK(startColl(args, type, op, root, 0, iter));
}
TESTCHECK(completeColl(args));
// Warm-up for small size
setupArgs(args->minbytes, type, args);
for (int iter = 0; iter < warmup_iters; iter++) {
TESTCHECK(startColl(args, type, op, root, 0, iter));
}
TESTCHECK(completeColl(args));
// Benchmark
for (size_t size = args->minbytes; size<=args->maxbytes; size = ((args->stepfactor > 1) ? size*args->stepfactor : size+args->stepbytes)) {
setupArgs(size, type, args);
print_line_header(max(args->sendBytes, args->expectedBytes), args->nbytes / wordSize(type), typeName, opName, root);
TESTCHECK(BenchTime(args, type, op, root, 0));
TESTCHECK(BenchTime(args, type, op, root, 1));
PRINT("\n");
}
return testSuccess;
}
testResult_t threadRunTests(struct threadArgs* args) {
// Set device to the first of our GPUs. If we don't do that, some operations
// will be done on the current GPU (by default : 0) and if the GPUs are in
// exclusive mode those operations will fail.
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus;
CUDACHECK(cudaSetDevice(gpuid));
TESTCHECK(ncclTestEngine.runTest(args, ncclroot, (ncclDataType_t)nccltype, test_typenames[nccltype], (ncclRedOp_t)ncclop, test_opnames[ncclop]));
return testSuccess;
}
testResult_t threadInit(struct threadArgs* args) {
char hostname[1024];
getHostName(hostname, 1024);
int nranks = args->nProcs*args->nThreads*args->nGpus;
//set main thread again
is_main_thread = (args->proc == 0 && args->thread == 0) ? 1 : 0;
NCCLCHECK(ncclGroupStart());
for (int i=0; i<args->nGpus; i++) {
int rank = args->proc*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(cudaSetDevice(gpuid));
NCCLCHECK(ncclCommInitRank(args->comms+i, nranks, args->ncclId, rank));
}
NCCLCHECK(ncclGroupEnd());
TESTCHECK(threadRunTests(args));
for (int i=0; i<args->nGpus; i++) {
NCCLCHECK(ncclCommDestroy(args->comms[i]));
}
return testSuccess;
}
void* threadLauncher(void* thread_) {
struct testThread* thread = (struct testThread*)thread_;
thread->ret = thread->func(&thread->args);
return NULL;
}
testResult_t threadLaunch(struct testThread* thread) {
pthread_create(&thread->thread, NULL, threadLauncher, thread);
return testSuccess;
}
testResult_t AllocateBuffs(void **sendbuff, size_t sendBytes, void **recvbuff, size_t recvBytes, void **expected, size_t nbytes, int nranks) {
CUDACHECK(cudaMalloc(sendbuff, nbytes));
CUDACHECK(cudaMalloc(recvbuff, nbytes));
CUDACHECK(cudaMalloc(expected, recvBytes));
return testSuccess;
}
testResult_t run(); // Main function
int main(int argc, char* argv[]) {
// Make sure everyline is flushed so that we see the progress of the test
setlinebuf(stdout);
// Parse args
int longindex;
static struct option longopts[] = {
{"nthreads", required_argument, 0, 't'},
{"ngpus", required_argument, 0, 'g'},
{"minbytes", required_argument, 0, 'b'},
{"maxbytes", required_argument, 0, 'e'},
{"stepbytes", required_argument, 0, 'i'},
{"stepfactor", required_argument, 0, 'f'},
{"iters", required_argument, 0, 'n'},
{"agg_iters", required_argument, 0, 'm'},
{"warmup_iters", required_argument, 0, 'w'},
{"parallel_init", required_argument, 0, 'p'},
{"check", required_argument, 0, 'c'},
{"op", required_argument, 0, 'o'},
{"datatype", required_argument, 0, 'd'},
{"root", required_argument, 0, 'r'},
{"blocking", required_argument, 0, 'z'},
{"help", no_argument, 0, 'h'}
};
while(1) {
int c;
c = getopt_long(argc, argv, "t:g:b:e:i:f:n:m:w:p:c:o:d:r:z:h", longopts, &longindex);
if (c == -1)
break;
switch(c) {
case 't':
nThreads = strtol(optarg, NULL, 0);
break;
case 'g':
nGpus = strtol(optarg, NULL, 0);
break;
case 'b':
minBytes = (size_t)parsesize(optarg);
break;
case 'e':
maxBytes = (size_t)parsesize(optarg);
break;
case 'i':
stepBytes = strtol(optarg, NULL, 0);
break;
case 'f':
stepFactor = strtol(optarg, NULL, 0);
break;
case 'n':
iters = (int)strtol(optarg, NULL, 0);
break;
case 'm':
#if NCCL_MAJOR >= 2 && NCCL_MINOR >= 2
agg_iters = (int)strtol(optarg, NULL, 0);
#else
printf("Option -m not supported before NCCL 2.2. Ignoring\n");
#endif
break;
case 'w':
warmup_iters = (int)strtol(optarg, NULL, 0);
break;
case 'c':
datacheck = (int)strtol(optarg, NULL, 0);
break;
case 'p':
parallel_init = (int)strtol(optarg, NULL, 0);
break;
case 'o':
ncclop = ncclstringtoop(optarg);
break;
case 'd':
nccltype = ncclstringtotype(optarg);
break;
case 'r':
ncclroot = strtol(optarg, NULL, 0);
break;
case 'z':
blocking_coll = strtol(optarg, NULL, 0);
break;
case 'h':
printf("USAGE: %s \n\t"
"[-t,--nthreads <num threads>] \n\t"
"[-g,--ngpus <gpus per thread>] \n\t"
"[-b,--minbytes <min size in bytes>] \n\t"
"[-e,--maxbytes <max size in bytes>] \n\t"
"[-i,--stepbytes <increment size>] \n\t"
"[-f,--stepfactor <increment factor>] \n\t"
"[-n,--iters <iteration count>] \n\t"
"[-m,--agg_iters <aggregated iteration count>] \n\t"
"[-w,--warmup_iters <warmup iteration count>] \n\t"
"[-p,--parallel_init <0/1>] \n\t"
"[-c,--check <0/1>] \n\t"
"[-o,--op <sum/prod/min/max/all>] \n\t"
"[-d,--datatype <nccltype/all>] \n\t"
"[-r,--root <root>] \n\t"
"[-z,--blocking <0/1>] \n\t"
"[-h,--help]\n",
basename(argv[0]));
return 0;
default:
printf("invalid option \n");
printf("USAGE: %s \n\t"
"[-t,--nthreads <num threads>] \n\t"
"[-g,--ngpus <gpus per thread>] \n\t"
"[-b,--minbytes <min size in bytes>] \n\t"
"[-e,--maxbytes <max size in bytes>] \n\t"
"[-i,--stepbytes <increment size>] \n\t"
"[-f,--stepfactor <increment factor>] \n\t"
"[-n,--iters <iteration count>] \n\t"
"[-m,--agg_iters <aggregated iteration count>] \n\t"
"[-w,--warmup_iters <warmup iteration count>] \n\t"
"[-p,--parallel_init <0/1>] \n\t"
"[-c,--check <0/1>] \n\t"
"[-o,--op <sum/prod/min/max/all>] \n\t"
"[-d,--datatype <nccltype/all>] \n\t"
"[-r,--root <root>] \n\t"
"[-z,--blocking <0/1>] \n\t"
"[-h,--help]\n",
basename(argv[0]));
return 0;
}
}
#ifdef MPI_SUPPORT
MPI_Init(&argc, &argv);
#endif
return run();
}
testResult_t run() {
int nProcs = 1, proc = 0;
int localRank = 0;
char hostname[1024];
getHostName(hostname, 1024);
#ifdef MPI_SUPPORT
MPI_Comm_size(MPI_COMM_WORLD, &nProcs);
MPI_Comm_rank(MPI_COMM_WORLD, &proc);
uint64_t hostHashs[nProcs];
hostHashs[proc] = getHostHash(hostname);
MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, hostHashs, sizeof(uint64_t), MPI_BYTE, MPI_COMM_WORLD);
for (int p=0; p<nProcs; p++) {
if (p == proc) break;
if (hostHashs[p] == hostHashs[proc]) localRank++;
}
#endif
is_main_thread = (proc == 0) ? 1 : 0;
PRINT("# nThread %d nGpus %d minBytes %ld maxBytes %ld step: %ld(%s) warmup iters: %d iters: %d validation: %d \n", nThreads, nGpus, minBytes, maxBytes,
(stepFactor > 1)?stepFactor:stepBytes, (stepFactor > 1)?"factor":"bytes", warmup_iters, iters, datacheck);
if (blocking_coll) PRINT("# Blocking Enabled: wait for completion and barrier after each collective \n");
if (parallel_init) PRINT("# Parallel Init Enabled: threads call into NcclInitRank concurrently \n");
PRINT("#\n");
PRINT("# Using devices\n");
#define MAX_LINE 2048
char line[MAX_LINE];
int len = 0;
for (int i=0; i<nThreads*nGpus; i++) {
int cudaDev = localRank*nThreads*nGpus+i;
int rank = proc*nThreads*nGpus+i;
cudaDeviceProp prop;
CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev));
len += snprintf(line+len, MAX_LINE-len, "# Rank %2d Pid %6d on %10s device %2d [0x%02x] %s\n",
rank, getpid(), hostname, cudaDev, prop.pciBusID, prop.name);
}
#if MPI_SUPPORT
char *lines = (proc == 0) ? (char *)malloc(nProcs*MAX_LINE) : NULL;
// Gather all output in rank order to root (0)
MPI_Gather(line, MAX_LINE, MPI_BYTE, lines, MAX_LINE, MPI_BYTE, 0, MPI_COMM_WORLD);
if (proc == 0) {
for (int p = 0; p < nProcs; p++)
PRINT("%s", lines+MAX_LINE*p);
free(lines);
}
#else
PRINT("%s", line);
#endif
ncclUniqueId ncclId;
if (proc == 0) {
NCCLCHECK(ncclGetUniqueId(&ncclId));
}
#ifdef MPI_SUPPORT
MPI_Bcast(&ncclId, sizeof(ncclId), MPI_BYTE, 0, MPI_COMM_WORLD);
#endif
cudaStream_t streams[nGpus*nThreads];
void* sendbuffs[nGpus*nThreads];
void* recvbuffs[nGpus*nThreads];
void* expected[nGpus*nThreads];
size_t sendBytes, recvBytes;
ncclTestEngine.getBuffSize(&sendBytes, &recvBytes, (size_t)maxBytes, (size_t)nProcs*nGpus*nThreads);
for (int i=0; i<nGpus*nThreads; i++) {
CUDACHECK(cudaSetDevice(localRank*nThreads*nGpus+i));
AllocateBuffs(sendbuffs+i, sendBytes, recvbuffs+i, recvBytes, expected+i, (size_t)maxBytes, nProcs*nThreads*nGpus);
CUDACHECK(cudaStreamCreateWithFlags(streams+i, cudaStreamNonBlocking));
}
//if parallel init is not selected, use main thread to initialize NCCL
ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nThreads*nGpus);
if (!parallel_init) {
if (nProcs == 1) {
int gpuArray[nGpus*nThreads];
for (int i=0; i<nGpus*nThreads; i++) gpuArray[i] = i;
NCCLCHECK(ncclCommInitAll(comms, nGpus*nThreads, gpuArray));
} else {
NCCLCHECK(ncclGroupStart());
for (int i=0; i<nGpus*nThreads; i++) {
CUDACHECK(cudaSetDevice(localRank*nThreads*nGpus+i));
NCCLCHECK(ncclCommInitRank(comms+i, nProcs*nThreads*nGpus, ncclId, proc*nThreads*nGpus+i));
}
NCCLCHECK(ncclGroupEnd());
}
}
int errors[nThreads];
double bw[nThreads];
double* delta;
CUDACHECK(cudaHostAlloc(&delta, sizeof(double)*nThreads, cudaHostAllocPortable | cudaHostAllocMapped));
int bw_count[nThreads];
for (int t=0; t<nThreads; t++) {
bw[t] = 0.0;
errors[t] = bw_count[t] = 0;
}
PRINT("#\n");
print_header();
int* sync = (int*)calloc(2, sizeof(int));
int* barrier = (int*)calloc(2, sizeof(int));
struct testThread threads[nThreads];
memset(threads, 0, sizeof(struct testThread)*nThreads);
for (int t=nThreads-1; t>=0; t--) {
threads[t].args.minbytes=minBytes;
threads[t].args.maxbytes=maxBytes;
threads[t].args.stepbytes=stepBytes;
threads[t].args.stepfactor=stepFactor;
threads[t].args.localRank = localRank;
threads[t].args.nProcs=nProcs;
threads[t].args.proc=proc;
threads[t].args.nThreads=nThreads;
threads[t].args.thread=t;
threads[t].args.nGpus=nGpus;
threads[t].args.sendbuffs = sendbuffs+t*nGpus;
threads[t].args.recvbuffs = recvbuffs+t*nGpus;
threads[t].args.expected = expected+t*nGpus;
threads[t].args.ncclId = ncclId;
threads[t].args.comms=comms+t*nGpus;
threads[t].args.streams=streams+t*nGpus;
threads[t].args.barrier = (volatile int*)barrier;
threads[t].args.barrier_idx = 0;
threads[t].args.sync = (volatile int*)sync;
threads[t].args.sync_idx = 0;
threads[t].args.deltaThreads = delta;
threads[t].args.deltaHost = (delta + t);
threads[t].args.delta = delta;
threads[t].args.errors=errors+t;
threads[t].args.bw=bw+t;
threads[t].args.bw_count=bw_count+t;
threads[t].args.reportErrors = 1;
threads[t].func = parallel_init ? threadInit : threadRunTests;
if (t)
TESTCHECK(threadLaunch(threads+t));
else
TESTCHECK(threads[t].func(&threads[t].args));
}
// Wait for other threads and accumulate stats and errors
for (int t=nThreads-1; t>=0; t--) {
if (t) pthread_join(threads[t].thread, NULL);
TESTCHECK(threads[t].ret);
if (t) {
errors[0] += errors[t];
bw[0] += bw[t];
bw_count[0] += bw_count[t];
}
}
#ifdef MPI_SUPPORT
MPI_Allreduce(MPI_IN_PLACE, &errors[0], 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
#endif
if (!parallel_init) {
for(int i=0; i<nGpus*nThreads; ++i)
NCCLCHECK(ncclCommDestroy(comms[i]));
free(comms);
}
// Free off CUDA allocated memory
for (int i=0; i<nGpus*nThreads; i++) {
CUDACHECK(cudaFree(sendbuffs[i]));
CUDACHECK(cudaFree(recvbuffs[i]));
CUDACHECK(cudaFree(expected[i]));
}
CUDACHECK(cudaFreeHost(delta));
char* str = getenv("NCCL_TESTS_MIN_BW");
double check_avg_bw = str ? atof(str) : -1;
bw[0] /= bw_count[0];
PRINT("# Out of bounds values : %d %s\n", errors[0], errors[0] ? "FAILED" : "OK");
PRINT("# Avg bus bandwidth : %g %s\n", bw[0], check_avg_bw == -1 ? "" : (bw[0] < check_avg_bw*(0.9) ? "FAILED" : "OK"));
PRINT("#\n");
#ifdef MPI_SUPPORT
MPI_Finalize();
#endif
// 'cuda-memcheck --leak-check full' requires this
cudaDeviceReset();
if (errors[0] || bw[0] < check_avg_bw*(0.9))
exit(EXIT_FAILURE);
else
exit(EXIT_SUCCESS);
}
|
07d37817dff41742c2ce2a9d7a7b9960124ea4c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
ROIAlign implementation in CUDA from pytorch framework
(https://github.com/pytorch/vision/tree/master/torchvision/csrc/cuda on Nov 14 2019)
Adapted for additional 3D capability by G. Ramien, DKFZ Heidelberg
*/
#include <ATen/ATen.h>
#include <ATen/TensorUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <cstdio>
#include "cuda_helpers.h"
/*-------------- gpu kernels -----------------*/
template <typename T>
__device__ T linear_interpolate(const T xl, const T val_low, const T val_high){
T val = (val_high - val_low) * xl + val_low;
return val;
}
template <typename T>
__device__ T trilinear_interpolate(const T* input, const int height, const int width, const int depth,
T y, T x, T z, const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width || z < -1.0 || z > depth) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
if (z <= 0)
z = 0;
int y0 = (int)y;
int x0 = (int)x;
int z0 = (int)z;
int y1;
int x1;
int z1;
if (y0 >= height - 1) {
/*if nearest gridpoint to y on the lower end is on border or border-1, set low, high, mid(=actual point) to border-1*/
y1 = y0 = height - 1;
y = (T)y0;
} else {
/* y1 is one pixel from y0, y is the actual point somewhere in between */
y1 = y0 + 1;
}
if (x0 >= width - 1) {
x1 = x0 = width - 1;
x = (T)x0;
} else {
x1 = x0 + 1;
}
if (z0 >= depth - 1) {
z1 = z0 = depth - 1;
z = (T)z0;
} else {
z1 = z0 + 1;
}
// do linear interpolation of x values
// distance of actual point to lower boundary point, already normalized since x_high - x0 = 1
T dis = x - x0;
/* accessing element b,c,y,x,z in 1D-rolled-out array of a tensor with dimensions (B, C, Y, X, Z):
tensor[b,c,y,x,z] = arr[ (((b*C+c)*Y+y)*X + x)*Z + z ] = arr[ alpha + (y*X + x)*Z + z ]
with alpha = batch&channel locator = (b*C+c)*YXZ.
hence, as current input pointer is already offset by alpha: y,x,z is at input[( y*X + x)*Z + z], where
X = width, Z = depth.
*/
T x00 = linear_interpolate(dis, input[(y0*width+ x0)*depth+z0], input[(y0*width+ x1)*depth+z0]);
T x10 = linear_interpolate(dis, input[(y1*width+ x0)*depth+z0], input[(y1*width+ x1)*depth+z0]);
T x01 = linear_interpolate(dis, input[(y0*width+ x0)*depth+z1], input[(y0*width+ x1)*depth+z1]);
T x11 = linear_interpolate(dis, input[(y1*width+ x0)*depth+z1], input[(y1*width+ x1)*depth+z1]);
// linear interpol of y values = bilinear interpol of f(x,y)
dis = y - y0;
T xy0 = linear_interpolate(dis, x00, x10);
T xy1 = linear_interpolate(dis, x01, x11);
// linear interpol of z value = trilinear interpol of f(x,y,z)
dis = z - z0;
T xyz = linear_interpolate(dis, xy0, xy1);
return xyz;
}
template <typename T>
__device__ void trilinear_interpolate_gradient(const int height, const int width, const int depth, T y, T x, T z,
T& g000, T& g001, T& g010, T& g100, T& g011, T& g101, T& g110, T& g111,
int& x0, int& x1, int& y0, int& y1, int& z0, int&z1, const int index /* index for debug only*/)
{
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width || z < -1.0 || z > depth) {
// empty
g000 = g001 = g010 = g100 = g011 = g101 = g110 = g111 = 0.;
x0 = x1 = y0 = y1 = z0 = z1 = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
if (z <= 0)
z = 0;
y0 = (int)y;
x0 = (int)x;
z0 = (int)z;
if (y0 >= height - 1) {
y1 = y0 = height - 1;
y = (T)y0;
} else {
y1 = y0 + 1;
}
if (x0 >= width - 1) {
x1 = x0 = width - 1;
x = (T)x0;
} else {
x1 = x0 + 1;
}
if (z0 >= depth - 1) {
z1 = z0 = depth - 1;
z = (T)z0;
} else {
z1 = z0 + 1;
}
// forward calculations are added as hints
T dis_x = x - x0;
//T x00 = linear_interpolate(dis, input[(y0*width+ x0)*depth+z0], input[(y0*width+ x1)*depth+z0]); // v000, v100
//T x10 = linear_interpolate(dis, input[(y1*width+ x0)*depth+z0], input[(y1*width+ x1)*depth+z0]); // v010, v110
//T x01 = linear_interpolate(dis, input[(y0*width+ x0)*depth+z1], input[(y0*width+ x1)*depth+z1]); // v001, v101
//T x11 = linear_interpolate(dis, input[(y1*width+ x0)*depth+z1], input[(y1*width+ x1)*depth+z1]); // v011, v111
// linear interpol of y values = bilinear interpol of f(x,y)
T dis_y = y - y0;
//T xy0 = linear_interpolate(dis, x00, x10);
//T xy1 = linear_interpolate(dis, x01, x11);
// linear interpol of z value = trilinear interpol of f(x,y,z)
T dis_z = z - z0;
//T xyz = linear_interpolate(dis, xy0, xy1);
/* need: grad_i := d(xyz)/d(v_i) with v_i = input_value_i for all i = 0,..,7 (eight input values --> eight-entry gradient)
d(lin_interp(dis,x,y))/dx = (-dis +1) and d(lin_interp(dis,x,y))/dy = dis --> derivatives are indep of x,y.
notation: gxyz = gradient for d(trilin_interp)/d(input_value_at_xyz)
below grads were calculated by hand
save time by reusing (1-dis_x) = 1-x+x0 = x1-x =: dis_x1 */
T dis_x1 = (1-dis_x), dis_y1 = (1-dis_y), dis_z1 = (1-dis_z);
g000 = dis_z1 * dis_y1 * dis_x1;
g001 = dis_z * dis_y1 * dis_x1;
g010 = dis_z1 * dis_y * dis_x1;
g100 = dis_z1 * dis_y1 * dis_x;
g011 = dis_z * dis_y * dis_x1;
g101 = dis_z * dis_y1 * dis_x;
g110 = dis_z1 * dis_y * dis_x;
g111 = dis_z * dis_y * dis_x;
return;
}
template <typename T>
__global__ void RoIAlignForward(const int nthreads, const T* input, const T spatial_scale, const int channels,
const int height, const int width, const int depth, const int pooled_height, const int pooled_width,
const int pooled_depth, const int sampling_ratio, const bool aligned, const T* rois, T* output)
{
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw, pd) is an element in the pooled output
int pd = index % pooled_depth;
int pw = (index / pooled_depth) % pooled_width;
int ph = (index / pooled_depth / pooled_width) % pooled_height;
int c = (index / pooled_depth / pooled_width / pooled_height) % channels;
int n = index / pooled_depth / pooled_width / pooled_height / channels;
// rois are (x1,y1,x2,y2,z1,z2) --> tensor of shape (n_rois, 6)
const T* offset_rois = rois + n * 7;
int roi_batch_ind = offset_rois[0];
// aligned==False means legacy version, True means pixel shift by -0.5.
T offset = aligned ? (T)0.5 : (T)0.0;
// Do not use rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - offset;
T roi_start_h = offset_rois[2] * spatial_scale - offset;
T roi_end_w = offset_rois[3] * spatial_scale - offset;
T roi_end_h = offset_rois[4] * spatial_scale - offset;
T roi_start_d = offset_rois[5] * spatial_scale - offset;
T roi_end_d = offset_rois[6] * spatial_scale - offset;
// Force malformed ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_depth = max(roi_end_d - roi_start_d, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T bin_size_d = static_cast<T>(roi_depth) / static_cast<T>(pooled_depth);
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width * depth;
// We use roi_bin_grid to sample the grid and mimic integral
// roi_bin_grid == nr of sampling points per bin >= 1
int roi_bin_grid_h =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
int roi_bin_grid_d =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_depth / pooled_depth);
// We do average (integral) pooling inside a bin
const T n_voxels = roi_bin_grid_h * roi_bin_grid_w * roi_bin_grid_d; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5, always in the middle of two grid pointsk
for (int ix = 0; ix < roi_bin_grid_w; ix++)
{
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
for (int iz = 0; iz < roi_bin_grid_d; iz++)
{
const T z = roi_start_d + pd * bin_size_d +
static_cast<T>(iz + .5f) * bin_size_d / static_cast<T>(roi_bin_grid_d);
T val = trilinear_interpolate(offset_input, height, width, depth, y, x, z, index);
output_val += val;
} // z iterator and calc+add value
} // x iterator
} // y iterator
output_val /= n_voxels;
output[index] = output_val;
}
}
template <typename T>
__global__ void RoIAlignBackward(const int nthreads, const T* grad_output, const T spatial_scale, const int channels,
const int height, const int width, const int depth, const int pooled_height, const int pooled_width,
const int pooled_depth, const int sampling_ratio, const bool aligned, T* grad_input, const T* rois,
const int n_stride, const int c_stride, const int h_stride, const int w_stride, const int d_stride)
{
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw, pd) is an element in the pooled output
int pd = index % pooled_depth;
int pw = (index / pooled_depth) % pooled_width;
int ph = (index / pooled_depth / pooled_width) % pooled_height;
int c = (index / pooled_depth / pooled_width / pooled_height) % channels;
int n = index / pooled_depth / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 7;
int roi_batch_ind = offset_rois[0];
// aligned==False means legacy version, True means pixel shift by -0.5.
T offset = aligned ? (T)0.5 : (T)0.0;
// rois are (x1,y1,x2,y2,z1,z2) --> tensor of shape (n_rois, 6)
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - offset;
T roi_start_h = offset_rois[2] * spatial_scale - offset;
T roi_end_w = offset_rois[3] * spatial_scale - offset;
T roi_end_h = offset_rois[4] * spatial_scale - offset;
T roi_start_d = offset_rois[5] * spatial_scale - offset;
T roi_end_d = offset_rois[6] * spatial_scale - offset;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T roi_depth = max(roi_end_d - roi_start_d, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T bin_size_d = static_cast<T>(roi_depth) / static_cast<T>(pooled_depth);
// offset: index b,c,y,x,z of tensor of shape (B,C,Y,X,Z) is
// b*C*Y*X*Z + c * Y*X*Z + y * X*Z + x *Z + z = (b*C+c)Y*X*Z + ...
T* offset_grad_input =
grad_input + ((roi_batch_ind * channels + c) * height * width * depth);
// We need to index the gradient using the tensor strides to access the correct values.
int output_offset = n * n_stride + c * c_stride;
const T* offset_grad_output = grad_output + output_offset;
const T grad_output_this_bin = offset_grad_output[ph * h_stride + pw * w_stride + pd * d_stride];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
int roi_bin_grid_d = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_depth / pooled_depth);
// We do average (integral) pooling inside a bin
const T n_voxels = roi_bin_grid_h * roi_bin_grid_w * roi_bin_grid_d; // e.g. = 6
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++)
{
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
for (int iz = 0; iz < roi_bin_grid_d; iz++)
{
const T z = roi_start_d + pd * bin_size_d +
static_cast<T>(iz + .5f) * bin_size_d / static_cast<T>(roi_bin_grid_d);
T g000, g001, g010, g100, g011, g101, g110, g111; // will hold the current partial derivatives
int x0, x1, y0, y1, z0, z1;
/* notation: gxyz = gradient at xyz, where x,y,z need to lie on feature-map grid (i.e., =x0,x1 etc.) */
trilinear_interpolate_gradient(height, width, depth, y, x, z,
g000, g001, g010, g100, g011, g101, g110, g111,
x0, x1, y0, y1, z0, z1, index);
/* chain rule: derivatives (i.e., the gradient) of trilin_interpolate(v1,v2,v3,v4,...) (div by n_voxels
as we actually need gradient of whole roi_align) are multiplied with gradient so far*/
g000 *= grad_output_this_bin / n_voxels;
g001 *= grad_output_this_bin / n_voxels;
g010 *= grad_output_this_bin / n_voxels;
g100 *= grad_output_this_bin / n_voxels;
g011 *= grad_output_this_bin / n_voxels;
g101 *= grad_output_this_bin / n_voxels;
g110 *= grad_output_this_bin / n_voxels;
g111 *= grad_output_this_bin / n_voxels;
if (x0 >= 0 && x1 >= 0 && y0 >= 0 && y1 >= 0 && z0 >= 0 && z1 >= 0)
{ // atomicAdd(address, content) reads content under address, adds content to it, while: no other thread
// can interfere with the memory at address during this operation (thread lock, therefore "atomic").
atomicAdd(offset_grad_input + (y0 * width + x0) * depth + z0, static_cast<T>(g000));
atomicAdd(offset_grad_input + (y0 * width + x0) * depth + z1, static_cast<T>(g001));
atomicAdd(offset_grad_input + (y1 * width + x0) * depth + z0, static_cast<T>(g010));
atomicAdd(offset_grad_input + (y0 * width + x1) * depth + z0, static_cast<T>(g100));
atomicAdd(offset_grad_input + (y1 * width + x0) * depth + z1, static_cast<T>(g011));
atomicAdd(offset_grad_input + (y0 * width + x1) * depth + z1, static_cast<T>(g101));
atomicAdd(offset_grad_input + (y1 * width + x1) * depth + z0, static_cast<T>(g110));
atomicAdd(offset_grad_input + (y1 * width + x1) * depth + z1, static_cast<T>(g111));
} // if
} // iz
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
/*----------- wrapper functions ----------------*/
at::Tensor ROIAlign_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale,
const int pooled_height, const int pooled_width, const int pooled_depth,
const int sampling_ratio, const bool aligned) {
/*
input: feature-map tensor, shape (batch, n_channels, y, x(, z))
*/
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto depth = input.size(4);
at::Tensor output = at::zeros(
{num_rois, channels, pooled_height, pooled_width, pooled_depth}, input.options());
auto output_size = num_rois * channels * pooled_height * pooled_width * pooled_depth;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
at::cuda::ATenCeilDiv(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIAlign forward in 3d", [&] {
hipLaunchKernelGGL(( RoIAlignForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
depth,
pooled_height,
pooled_width,
pooled_depth,
sampling_ratio,
aligned,
rois.contiguous().data_ptr<scalar_t>(),
output.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
return output;
}
at::Tensor ROIAlign_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int pooled_depth,
const int batch_size,
const int channels,
const int height,
const int width,
const int depth,
const int sampling_ratio,
const bool aligned)
{
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
at::Tensor grad_input =
at::zeros({batch_size, channels, height, width, depth}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
int n_stride = grad.stride(0);
int c_stride = grad.stride(1);
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
int d_stride = grad.stride(4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIAlign backward 3D", [&] {
hipLaunchKernelGGL(( RoIAlignBackward<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
depth,
pooled_height,
pooled_width,
pooled_depth,
sampling_ratio,
aligned,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<scalar_t>(),
n_stride,
c_stride,
h_stride,
w_stride,
d_stride);
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
} | 07d37817dff41742c2ce2a9d7a7b9960124ea4c2.cu | /*
ROIAlign implementation in CUDA from pytorch framework
(https://github.com/pytorch/vision/tree/master/torchvision/csrc/cuda on Nov 14 2019)
Adapted for additional 3D capability by G. Ramien, DKFZ Heidelberg
*/
#include <ATen/ATen.h>
#include <ATen/TensorUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <cstdio>
#include "cuda_helpers.h"
/*-------------- gpu kernels -----------------*/
template <typename T>
__device__ T linear_interpolate(const T xl, const T val_low, const T val_high){
T val = (val_high - val_low) * xl + val_low;
return val;
}
template <typename T>
__device__ T trilinear_interpolate(const T* input, const int height, const int width, const int depth,
T y, T x, T z, const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width || z < -1.0 || z > depth) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
if (z <= 0)
z = 0;
int y0 = (int)y;
int x0 = (int)x;
int z0 = (int)z;
int y1;
int x1;
int z1;
if (y0 >= height - 1) {
/*if nearest gridpoint to y on the lower end is on border or border-1, set low, high, mid(=actual point) to border-1*/
y1 = y0 = height - 1;
y = (T)y0;
} else {
/* y1 is one pixel from y0, y is the actual point somewhere in between */
y1 = y0 + 1;
}
if (x0 >= width - 1) {
x1 = x0 = width - 1;
x = (T)x0;
} else {
x1 = x0 + 1;
}
if (z0 >= depth - 1) {
z1 = z0 = depth - 1;
z = (T)z0;
} else {
z1 = z0 + 1;
}
// do linear interpolation of x values
// distance of actual point to lower boundary point, already normalized since x_high - x0 = 1
T dis = x - x0;
/* accessing element b,c,y,x,z in 1D-rolled-out array of a tensor with dimensions (B, C, Y, X, Z):
tensor[b,c,y,x,z] = arr[ (((b*C+c)*Y+y)*X + x)*Z + z ] = arr[ alpha + (y*X + x)*Z + z ]
with alpha = batch&channel locator = (b*C+c)*YXZ.
hence, as current input pointer is already offset by alpha: y,x,z is at input[( y*X + x)*Z + z], where
X = width, Z = depth.
*/
T x00 = linear_interpolate(dis, input[(y0*width+ x0)*depth+z0], input[(y0*width+ x1)*depth+z0]);
T x10 = linear_interpolate(dis, input[(y1*width+ x0)*depth+z0], input[(y1*width+ x1)*depth+z0]);
T x01 = linear_interpolate(dis, input[(y0*width+ x0)*depth+z1], input[(y0*width+ x1)*depth+z1]);
T x11 = linear_interpolate(dis, input[(y1*width+ x0)*depth+z1], input[(y1*width+ x1)*depth+z1]);
// linear interpol of y values = bilinear interpol of f(x,y)
dis = y - y0;
T xy0 = linear_interpolate(dis, x00, x10);
T xy1 = linear_interpolate(dis, x01, x11);
// linear interpol of z value = trilinear interpol of f(x,y,z)
dis = z - z0;
T xyz = linear_interpolate(dis, xy0, xy1);
return xyz;
}
template <typename T>
__device__ void trilinear_interpolate_gradient(const int height, const int width, const int depth, T y, T x, T z,
T& g000, T& g001, T& g010, T& g100, T& g011, T& g101, T& g110, T& g111,
int& x0, int& x1, int& y0, int& y1, int& z0, int&z1, const int index /* index for debug only*/)
{
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width || z < -1.0 || z > depth) {
// empty
g000 = g001 = g010 = g100 = g011 = g101 = g110 = g111 = 0.;
x0 = x1 = y0 = y1 = z0 = z1 = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
if (z <= 0)
z = 0;
y0 = (int)y;
x0 = (int)x;
z0 = (int)z;
if (y0 >= height - 1) {
y1 = y0 = height - 1;
y = (T)y0;
} else {
y1 = y0 + 1;
}
if (x0 >= width - 1) {
x1 = x0 = width - 1;
x = (T)x0;
} else {
x1 = x0 + 1;
}
if (z0 >= depth - 1) {
z1 = z0 = depth - 1;
z = (T)z0;
} else {
z1 = z0 + 1;
}
// forward calculations are added as hints
T dis_x = x - x0;
//T x00 = linear_interpolate(dis, input[(y0*width+ x0)*depth+z0], input[(y0*width+ x1)*depth+z0]); // v000, v100
//T x10 = linear_interpolate(dis, input[(y1*width+ x0)*depth+z0], input[(y1*width+ x1)*depth+z0]); // v010, v110
//T x01 = linear_interpolate(dis, input[(y0*width+ x0)*depth+z1], input[(y0*width+ x1)*depth+z1]); // v001, v101
//T x11 = linear_interpolate(dis, input[(y1*width+ x0)*depth+z1], input[(y1*width+ x1)*depth+z1]); // v011, v111
// linear interpol of y values = bilinear interpol of f(x,y)
T dis_y = y - y0;
//T xy0 = linear_interpolate(dis, x00, x10);
//T xy1 = linear_interpolate(dis, x01, x11);
// linear interpol of z value = trilinear interpol of f(x,y,z)
T dis_z = z - z0;
//T xyz = linear_interpolate(dis, xy0, xy1);
/* need: grad_i := d(xyz)/d(v_i) with v_i = input_value_i for all i = 0,..,7 (eight input values --> eight-entry gradient)
d(lin_interp(dis,x,y))/dx = (-dis +1) and d(lin_interp(dis,x,y))/dy = dis --> derivatives are indep of x,y.
notation: gxyz = gradient for d(trilin_interp)/d(input_value_at_xyz)
below grads were calculated by hand
save time by reusing (1-dis_x) = 1-x+x0 = x1-x =: dis_x1 */
T dis_x1 = (1-dis_x), dis_y1 = (1-dis_y), dis_z1 = (1-dis_z);
g000 = dis_z1 * dis_y1 * dis_x1;
g001 = dis_z * dis_y1 * dis_x1;
g010 = dis_z1 * dis_y * dis_x1;
g100 = dis_z1 * dis_y1 * dis_x;
g011 = dis_z * dis_y * dis_x1;
g101 = dis_z * dis_y1 * dis_x;
g110 = dis_z1 * dis_y * dis_x;
g111 = dis_z * dis_y * dis_x;
return;
}
template <typename T>
__global__ void RoIAlignForward(const int nthreads, const T* input, const T spatial_scale, const int channels,
const int height, const int width, const int depth, const int pooled_height, const int pooled_width,
const int pooled_depth, const int sampling_ratio, const bool aligned, const T* rois, T* output)
{
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw, pd) is an element in the pooled output
int pd = index % pooled_depth;
int pw = (index / pooled_depth) % pooled_width;
int ph = (index / pooled_depth / pooled_width) % pooled_height;
int c = (index / pooled_depth / pooled_width / pooled_height) % channels;
int n = index / pooled_depth / pooled_width / pooled_height / channels;
// rois are (x1,y1,x2,y2,z1,z2) --> tensor of shape (n_rois, 6)
const T* offset_rois = rois + n * 7;
int roi_batch_ind = offset_rois[0];
// aligned==False means legacy version, True means pixel shift by -0.5.
T offset = aligned ? (T)0.5 : (T)0.0;
// Do not use rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - offset;
T roi_start_h = offset_rois[2] * spatial_scale - offset;
T roi_end_w = offset_rois[3] * spatial_scale - offset;
T roi_end_h = offset_rois[4] * spatial_scale - offset;
T roi_start_d = offset_rois[5] * spatial_scale - offset;
T roi_end_d = offset_rois[6] * spatial_scale - offset;
// Force malformed ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_depth = max(roi_end_d - roi_start_d, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T bin_size_d = static_cast<T>(roi_depth) / static_cast<T>(pooled_depth);
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width * depth;
// We use roi_bin_grid to sample the grid and mimic integral
// roi_bin_grid == nr of sampling points per bin >= 1
int roi_bin_grid_h =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
int roi_bin_grid_d =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_depth / pooled_depth);
// We do average (integral) pooling inside a bin
const T n_voxels = roi_bin_grid_h * roi_bin_grid_w * roi_bin_grid_d; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5, always in the middle of two grid pointsk
for (int ix = 0; ix < roi_bin_grid_w; ix++)
{
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
for (int iz = 0; iz < roi_bin_grid_d; iz++)
{
const T z = roi_start_d + pd * bin_size_d +
static_cast<T>(iz + .5f) * bin_size_d / static_cast<T>(roi_bin_grid_d);
T val = trilinear_interpolate(offset_input, height, width, depth, y, x, z, index);
output_val += val;
} // z iterator and calc+add value
} // x iterator
} // y iterator
output_val /= n_voxels;
output[index] = output_val;
}
}
template <typename T>
__global__ void RoIAlignBackward(const int nthreads, const T* grad_output, const T spatial_scale, const int channels,
const int height, const int width, const int depth, const int pooled_height, const int pooled_width,
const int pooled_depth, const int sampling_ratio, const bool aligned, T* grad_input, const T* rois,
const int n_stride, const int c_stride, const int h_stride, const int w_stride, const int d_stride)
{
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw, pd) is an element in the pooled output
int pd = index % pooled_depth;
int pw = (index / pooled_depth) % pooled_width;
int ph = (index / pooled_depth / pooled_width) % pooled_height;
int c = (index / pooled_depth / pooled_width / pooled_height) % channels;
int n = index / pooled_depth / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 7;
int roi_batch_ind = offset_rois[0];
// aligned==False means legacy version, True means pixel shift by -0.5.
T offset = aligned ? (T)0.5 : (T)0.0;
// rois are (x1,y1,x2,y2,z1,z2) --> tensor of shape (n_rois, 6)
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - offset;
T roi_start_h = offset_rois[2] * spatial_scale - offset;
T roi_end_w = offset_rois[3] * spatial_scale - offset;
T roi_end_h = offset_rois[4] * spatial_scale - offset;
T roi_start_d = offset_rois[5] * spatial_scale - offset;
T roi_end_d = offset_rois[6] * spatial_scale - offset;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T roi_depth = max(roi_end_d - roi_start_d, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T bin_size_d = static_cast<T>(roi_depth) / static_cast<T>(pooled_depth);
// offset: index b,c,y,x,z of tensor of shape (B,C,Y,X,Z) is
// b*C*Y*X*Z + c * Y*X*Z + y * X*Z + x *Z + z = (b*C+c)Y*X*Z + ...
T* offset_grad_input =
grad_input + ((roi_batch_ind * channels + c) * height * width * depth);
// We need to index the gradient using the tensor strides to access the correct values.
int output_offset = n * n_stride + c * c_stride;
const T* offset_grad_output = grad_output + output_offset;
const T grad_output_this_bin = offset_grad_output[ph * h_stride + pw * w_stride + pd * d_stride];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
int roi_bin_grid_d = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_depth / pooled_depth);
// We do average (integral) pooling inside a bin
const T n_voxels = roi_bin_grid_h * roi_bin_grid_w * roi_bin_grid_d; // e.g. = 6
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++)
{
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
for (int iz = 0; iz < roi_bin_grid_d; iz++)
{
const T z = roi_start_d + pd * bin_size_d +
static_cast<T>(iz + .5f) * bin_size_d / static_cast<T>(roi_bin_grid_d);
T g000, g001, g010, g100, g011, g101, g110, g111; // will hold the current partial derivatives
int x0, x1, y0, y1, z0, z1;
/* notation: gxyz = gradient at xyz, where x,y,z need to lie on feature-map grid (i.e., =x0,x1 etc.) */
trilinear_interpolate_gradient(height, width, depth, y, x, z,
g000, g001, g010, g100, g011, g101, g110, g111,
x0, x1, y0, y1, z0, z1, index);
/* chain rule: derivatives (i.e., the gradient) of trilin_interpolate(v1,v2,v3,v4,...) (div by n_voxels
as we actually need gradient of whole roi_align) are multiplied with gradient so far*/
g000 *= grad_output_this_bin / n_voxels;
g001 *= grad_output_this_bin / n_voxels;
g010 *= grad_output_this_bin / n_voxels;
g100 *= grad_output_this_bin / n_voxels;
g011 *= grad_output_this_bin / n_voxels;
g101 *= grad_output_this_bin / n_voxels;
g110 *= grad_output_this_bin / n_voxels;
g111 *= grad_output_this_bin / n_voxels;
if (x0 >= 0 && x1 >= 0 && y0 >= 0 && y1 >= 0 && z0 >= 0 && z1 >= 0)
{ // atomicAdd(address, content) reads content under address, adds content to it, while: no other thread
// can interfere with the memory at address during this operation (thread lock, therefore "atomic").
atomicAdd(offset_grad_input + (y0 * width + x0) * depth + z0, static_cast<T>(g000));
atomicAdd(offset_grad_input + (y0 * width + x0) * depth + z1, static_cast<T>(g001));
atomicAdd(offset_grad_input + (y1 * width + x0) * depth + z0, static_cast<T>(g010));
atomicAdd(offset_grad_input + (y0 * width + x1) * depth + z0, static_cast<T>(g100));
atomicAdd(offset_grad_input + (y1 * width + x0) * depth + z1, static_cast<T>(g011));
atomicAdd(offset_grad_input + (y0 * width + x1) * depth + z1, static_cast<T>(g101));
atomicAdd(offset_grad_input + (y1 * width + x1) * depth + z0, static_cast<T>(g110));
atomicAdd(offset_grad_input + (y1 * width + x1) * depth + z1, static_cast<T>(g111));
} // if
} // iz
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
/*----------- wrapper functions ----------------*/
at::Tensor ROIAlign_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale,
const int pooled_height, const int pooled_width, const int pooled_depth,
const int sampling_ratio, const bool aligned) {
/*
input: feature-map tensor, shape (batch, n_channels, y, x(, z))
*/
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto depth = input.size(4);
at::Tensor output = at::zeros(
{num_rois, channels, pooled_height, pooled_width, pooled_depth}, input.options());
auto output_size = num_rois * channels * pooled_height * pooled_width * pooled_depth;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
at::cuda::ATenCeilDiv(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIAlign forward in 3d", [&] {
RoIAlignForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
depth,
pooled_height,
pooled_width,
pooled_depth,
sampling_ratio,
aligned,
rois.contiguous().data_ptr<scalar_t>(),
output.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
at::Tensor ROIAlign_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int pooled_depth,
const int batch_size,
const int channels,
const int height,
const int width,
const int depth,
const int sampling_ratio,
const bool aligned)
{
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
at::Tensor grad_input =
at::zeros({batch_size, channels, height, width, depth}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
int n_stride = grad.stride(0);
int c_stride = grad.stride(1);
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
int d_stride = grad.stride(4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIAlign backward 3D", [&] {
RoIAlignBackward<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
depth,
pooled_height,
pooled_width,
pooled_depth,
sampling_ratio,
aligned,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<scalar_t>(),
n_stride,
c_stride,
h_stride,
w_stride,
d_stride);
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
} |
c47e01c547f75652e5149b8a15c78a1056bf8803.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "con.h"
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
__global__ void convolutionRowsGPU(float *h_Dst, float *h_Src, float *h_Kernel, int kernelR, int *size, int *thread, int index);
__global__ void convolutionColumnsGPU(float *h_Dst, float *h_Src, float *h_Kernel,int kernelR, int *size, int *thread, int index);
void convolutionRowsCPU(float *h_Dst, float *h_Src, float *h_Kernel, int kernelR, int *size, int *thread, int index);
void convolutionColumnsCPU(float *h_Dst, float *h_Src, float *h_Kernel,int kernelR, int *size, int *thread, int index);
int main(){
float **h_Kernel, **h_Input, **d_Buffer, **h_OutputGPU;
float **d_Output, **d_Kernel, **d_Input;
float **h_OutputCPU, **h_Buffer;
int *num_thread, *num_thread_dev;
int *num_size;
int pos_task[BT_NUM][TK_NUM];
int *pos_task_dev[BT_NUM];
FILE *fp;
int i, j;
double start_timer, end_timer;
hipSetDevice(0);
//printf("Initializing data...\n");
h_Kernel = (float **)malloc(BT_NUM * sizeof(float*));
h_Input = (float **)malloc(BT_NUM * sizeof(float*));
d_Buffer = (float **)malloc(BT_NUM * sizeof(float*));
h_OutputGPU = (float **)malloc(BT_NUM * sizeof(float*));
d_Output = (float **)malloc(BT_NUM * sizeof(float*));
d_Kernel = (float **)malloc(BT_NUM * sizeof(float*));
d_Input = (float **)malloc(BT_NUM * sizeof(float*));
h_OutputCPU = (float **)malloc(BT_NUM * sizeof(float*));
h_Buffer = (float **)malloc(BT_NUM * sizeof(float*));
num_thread = (int*)malloc(NUM_TASK * sizeof(int));
num_size = (int*)malloc(BT_NUM * sizeof(int));
fp = fopen("rand.txt", "r");
for(i = 0; i < NUM_TASK; i++)
fscanf(fp, "%1d", &num_thread[i]);
fclose(fp);
for(i = 0; i < NUM_TASK; i++)
num_thread[i] *= 32;
for(i = 0; i < BT_NUM; i++){
num_size[i] = 0;
}
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < TK_NUM; j++){
num_size[i] += (num_thread[i*TK_NUM+j])*
(num_thread[i*TK_NUM+j]);
pos_task[i][j] = 0;
if(j > 0) pos_task[i][j] += pos_task[i][j-1] + (num_thread[i*TK_NUM+j-1])*
(num_thread[i*TK_NUM+j-1]);
}
}
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipHostMalloc(&h_Kernel[i], KERNEL_LENGTH*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipHostMalloc(&h_Input[i], num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_Buffer[i], num_size[i] * sizeof(float)));
checkCudaErrors(hipHostMalloc(&h_OutputGPU[i], num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_Output[i], num_size[i] * sizeof(float)));
checkCudaErrors(hipMalloc(&d_Input[i], num_size[i] * sizeof(float)));
checkCudaErrors(hipMalloc(&d_Kernel[i], KERNEL_LENGTH * sizeof(float)));
h_OutputCPU[i] = (float*)malloc(num_size[i]*sizeof(float));
h_Buffer[i] = (float*)malloc(num_size[i]*sizeof(float));
checkCudaErrors(hipMalloc(&pos_task_dev[i], TK_NUM*sizeof(int)));
}
checkCudaErrors(hipMalloc(&num_thread_dev, NUM_TASK*sizeof(int)));
printf("Inputs are generating\n");
for(i = 0; i < BT_NUM;i++){
for (j = 0; j < KERNEL_LENGTH; j++){
h_Kernel[i][j] = (float)j/KERNEL_LENGTH;
}
}
for(i = 0; i < BT_NUM;i++){
for (j = 0; j < num_size[i]; j++){
h_Input[i][j] = (float)((j/imageW)%2);
}
}
//mem. copy
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipMemcpy(d_Kernel[i], h_Kernel[i], KERNEL_LENGTH*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Input[i], h_Input[i], num_size[i]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(pos_task_dev[i], pos_task[i], TK_NUM*sizeof(int), hipMemcpyHostToDevice));
}
checkCudaErrors(hipMemcpy(num_thread_dev, num_thread, NUM_TASK*sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipDeviceSynchronize());
printf("Convolution CUDA static fusion program is running\n");
start_timer = my_timer();
for(i = 0; i < BT_NUM; i++){
hipLaunchKernelGGL(( convolutionRowsGPU), dim3(TK_NUM), dim3(TDK_NUM), 0, 0, d_Buffer[i], d_Input[i], d_Kernel[i], KERNEL_RADIUS, pos_task_dev[i], num_thread_dev, i);
}
checkCudaErrors(hipDeviceSynchronize());
for(i = 0; i < BT_NUM; i++){
hipLaunchKernelGGL(( convolutionColumnsGPU), dim3(TK_NUM), dim3(TDK_NUM), 0, 0, d_Output[i], d_Buffer[i], d_Kernel[i],KERNEL_RADIUS, pos_task_dev[i], num_thread_dev, i);
}
checkCudaErrors(hipDeviceSynchronize());
end_timer = my_timer();
printf("Convolution CUDA static fusion elapsed Time: %lf Sec.\n", end_timer - start_timer);
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipMemcpy(h_OutputGPU[i], d_Output[i], num_size[i]*sizeof(float), hipMemcpyDeviceToHost));
}
checkCudaErrors(hipDeviceSynchronize());
printf("CPU program running\n");
start_timer = my_timer();
for(i = 0; i < BT_NUM; i++){
convolutionRowsCPU(h_Buffer[i], h_Input[i], h_Kernel[i], KERNEL_RADIUS, pos_task[i], num_thread, i);
}
for(i = 0; i < BT_NUM; i++){
convolutionColumnsCPU(h_OutputCPU[i], h_Buffer[i], h_Kernel[i],KERNEL_RADIUS, pos_task[i], num_thread, i);
}
end_timer = my_timer();
//printf("CPU elapsed time:%lf\n", end_timer - start_timer);
/*output result*/
printf("verifying\n");
int flag = 0;
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < num_size[i]; j++){
if(abs(h_OutputCPU[i][j]- h_OutputGPU[i][j])> 0.1){
printf("Error:%f, %f, %d, %d\n", h_OutputCPU[i][j], h_OutputGPU[i][j], i, j);
flag = 1;
break;
}
}
}
if(!flag) printf("verify successfully\n");
//free mem.
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipFree(d_Buffer[i]));
checkCudaErrors(hipHostFree(h_Input[i]));
checkCudaErrors(hipHostFree(h_Kernel[i]));
checkCudaErrors(hipHostFree(h_OutputGPU[i]));
checkCudaErrors(hipFree(d_Kernel[i]));
checkCudaErrors(hipFree(d_Output[i]));
checkCudaErrors(hipFree(d_Input[i]));
free(h_OutputCPU[i]);
free(h_Buffer[i]);
checkCudaErrors(hipFree(pos_task_dev[i]));
}
free(d_Buffer);
free(h_Input);
free(h_Kernel);
free(d_Kernel);
free(d_Output);
free(d_Input);
free(h_OutputGPU);
free(num_thread);
free(num_size);
free(h_OutputCPU);
free(h_Buffer);
checkCudaErrors(hipFree(num_thread_dev));
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowsGPU(
float *h_Dst,
float *h_Src,
float *h_Kernel,
int kernelR,
int *size,
int *thread,
int index
)
{
int x, k, d;
float sum;
int td;
int y = threadIdx.x;
int bk = blockIdx.x;
td = thread[index*TK_NUM+bk];
if(y < td)
for(x = 0; x < (td*td)/td; x++)
{
sum = 0;
for (k = -kernelR; k <= kernelR; k++)
{
d = ((x*td+y)%td) + k;
if (d >= 0 && d < td)
sum += h_Src[((x*td+y)/td) * td + d + size[bk]] * h_Kernel[kernelR - k];
}
h_Dst[(x*td+y)/td * td + ((x*td+y)%td) + size[bk]] = sum;
}
}
void convolutionRowsCPU(
float *h_Dst,
float *h_Src,
float *h_Kernel,
int kernelR,
int *size,
int *thread,
int index
)
{
int x, k, d, y, t;
float sum;
int td;
for(t = 0; t < TK_NUM; t++){
td = thread[index*TK_NUM+t];
for(y = 0; y < td; y++){
for(x = 0; x < td; x++)
{
sum = 0;
for (k = -kernelR; k <= kernelR; k++)
{
d = ((x*td+y)%td) + k;
if (d >= 0 && d < td)
sum += h_Src[((x*td+y)/td) * td + d + size[t]] * h_Kernel[kernelR - k];
}
h_Dst[(x*td+y)/td * td + ((x*td+y)%td) + size[t]] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnsGPU(
float *h_Dst,
float *h_Src,
float *h_Kernel,
int kernelR,
int *size,
int *thread,
int index
)
{
int x, k, d;
float sum;
int y = threadIdx.x;
int bk = blockIdx.x;
int td;
#if 1
td = thread[index*TK_NUM+bk];
if(y < td){
for(x = 0; x < (td*td)/td; x++)
{
sum = 0;
for (k = -kernelR; k <= kernelR; k++)
{
d = ((x*td+y)/td) + k;
if (d >= 0 && d < td)
sum += h_Src[d * td + (x*td+y)%td + size[bk]] * h_Kernel[kernelR - k];
}
h_Dst[(x*td+y)/td * td + (x*td+y)%td + size[bk]] = sum;
}
}
#endif
}
void convolutionColumnsCPU(
float *h_Dst,
float *h_Src,
float *h_Kernel,
int kernelR,
int *size,
int *thread,
int index
)
{
int x, k, d, y, t;
float sum;
int td;
for(t = 0; t < TK_NUM; t++){
td = thread[index*TK_NUM+t];
for(y = 0; y < td; y++){
for(x = 0; x< td; x++)
{
sum = 0;
for (k = -kernelR; k <= kernelR; k++)
{
d = (x*td+y)/td + k;
d = ((x*td+y)/td) + k;
if (d >= 0 && d < td)
sum += h_Src[d * td + (x*td+y)%td + size[t]] * h_Kernel[kernelR - k];
}
h_Dst[(x*td+y)/td * td + (x*td+y)%td + size[t]] = sum;
}
}
}
}
| c47e01c547f75652e5149b8a15c78a1056bf8803.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include "con.h"
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
__global__ void convolutionRowsGPU(float *h_Dst, float *h_Src, float *h_Kernel, int kernelR, int *size, int *thread, int index);
__global__ void convolutionColumnsGPU(float *h_Dst, float *h_Src, float *h_Kernel,int kernelR, int *size, int *thread, int index);
void convolutionRowsCPU(float *h_Dst, float *h_Src, float *h_Kernel, int kernelR, int *size, int *thread, int index);
void convolutionColumnsCPU(float *h_Dst, float *h_Src, float *h_Kernel,int kernelR, int *size, int *thread, int index);
int main(){
float **h_Kernel, **h_Input, **d_Buffer, **h_OutputGPU;
float **d_Output, **d_Kernel, **d_Input;
float **h_OutputCPU, **h_Buffer;
int *num_thread, *num_thread_dev;
int *num_size;
int pos_task[BT_NUM][TK_NUM];
int *pos_task_dev[BT_NUM];
FILE *fp;
int i, j;
double start_timer, end_timer;
cudaSetDevice(0);
//printf("Initializing data...\n");
h_Kernel = (float **)malloc(BT_NUM * sizeof(float*));
h_Input = (float **)malloc(BT_NUM * sizeof(float*));
d_Buffer = (float **)malloc(BT_NUM * sizeof(float*));
h_OutputGPU = (float **)malloc(BT_NUM * sizeof(float*));
d_Output = (float **)malloc(BT_NUM * sizeof(float*));
d_Kernel = (float **)malloc(BT_NUM * sizeof(float*));
d_Input = (float **)malloc(BT_NUM * sizeof(float*));
h_OutputCPU = (float **)malloc(BT_NUM * sizeof(float*));
h_Buffer = (float **)malloc(BT_NUM * sizeof(float*));
num_thread = (int*)malloc(NUM_TASK * sizeof(int));
num_size = (int*)malloc(BT_NUM * sizeof(int));
fp = fopen("rand.txt", "r");
for(i = 0; i < NUM_TASK; i++)
fscanf(fp, "%1d", &num_thread[i]);
fclose(fp);
for(i = 0; i < NUM_TASK; i++)
num_thread[i] *= 32;
for(i = 0; i < BT_NUM; i++){
num_size[i] = 0;
}
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < TK_NUM; j++){
num_size[i] += (num_thread[i*TK_NUM+j])*
(num_thread[i*TK_NUM+j]);
pos_task[i][j] = 0;
if(j > 0) pos_task[i][j] += pos_task[i][j-1] + (num_thread[i*TK_NUM+j-1])*
(num_thread[i*TK_NUM+j-1]);
}
}
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaHostAlloc(&h_Kernel[i], KERNEL_LENGTH*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaHostAlloc(&h_Input[i], num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_Buffer[i], num_size[i] * sizeof(float)));
checkCudaErrors(cudaHostAlloc(&h_OutputGPU[i], num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_Output[i], num_size[i] * sizeof(float)));
checkCudaErrors(cudaMalloc(&d_Input[i], num_size[i] * sizeof(float)));
checkCudaErrors(cudaMalloc(&d_Kernel[i], KERNEL_LENGTH * sizeof(float)));
h_OutputCPU[i] = (float*)malloc(num_size[i]*sizeof(float));
h_Buffer[i] = (float*)malloc(num_size[i]*sizeof(float));
checkCudaErrors(cudaMalloc(&pos_task_dev[i], TK_NUM*sizeof(int)));
}
checkCudaErrors(cudaMalloc(&num_thread_dev, NUM_TASK*sizeof(int)));
printf("Inputs are generating\n");
for(i = 0; i < BT_NUM;i++){
for (j = 0; j < KERNEL_LENGTH; j++){
h_Kernel[i][j] = (float)j/KERNEL_LENGTH;
}
}
for(i = 0; i < BT_NUM;i++){
for (j = 0; j < num_size[i]; j++){
h_Input[i][j] = (float)((j/imageW)%2);
}
}
//mem. copy
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaMemcpy(d_Kernel[i], h_Kernel[i], KERNEL_LENGTH*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Input[i], h_Input[i], num_size[i]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(pos_task_dev[i], pos_task[i], TK_NUM*sizeof(int), cudaMemcpyHostToDevice));
}
checkCudaErrors(cudaMemcpy(num_thread_dev, num_thread, NUM_TASK*sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaDeviceSynchronize());
printf("Convolution CUDA static fusion program is running\n");
start_timer = my_timer();
for(i = 0; i < BT_NUM; i++){
convolutionRowsGPU<<<TK_NUM, TDK_NUM>>>(d_Buffer[i], d_Input[i], d_Kernel[i], KERNEL_RADIUS, pos_task_dev[i], num_thread_dev, i);
}
checkCudaErrors(cudaDeviceSynchronize());
for(i = 0; i < BT_NUM; i++){
convolutionColumnsGPU<<<TK_NUM, TDK_NUM>>>(d_Output[i], d_Buffer[i], d_Kernel[i],KERNEL_RADIUS, pos_task_dev[i], num_thread_dev, i);
}
checkCudaErrors(cudaDeviceSynchronize());
end_timer = my_timer();
printf("Convolution CUDA static fusion elapsed Time: %lf Sec.\n", end_timer - start_timer);
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaMemcpy(h_OutputGPU[i], d_Output[i], num_size[i]*sizeof(float), cudaMemcpyDeviceToHost));
}
checkCudaErrors(cudaDeviceSynchronize());
printf("CPU program running\n");
start_timer = my_timer();
for(i = 0; i < BT_NUM; i++){
convolutionRowsCPU(h_Buffer[i], h_Input[i], h_Kernel[i], KERNEL_RADIUS, pos_task[i], num_thread, i);
}
for(i = 0; i < BT_NUM; i++){
convolutionColumnsCPU(h_OutputCPU[i], h_Buffer[i], h_Kernel[i],KERNEL_RADIUS, pos_task[i], num_thread, i);
}
end_timer = my_timer();
//printf("CPU elapsed time:%lf\n", end_timer - start_timer);
/*output result*/
printf("verifying\n");
int flag = 0;
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < num_size[i]; j++){
if(abs(h_OutputCPU[i][j]- h_OutputGPU[i][j])> 0.1){
printf("Error:%f, %f, %d, %d\n", h_OutputCPU[i][j], h_OutputGPU[i][j], i, j);
flag = 1;
break;
}
}
}
if(!flag) printf("verify successfully\n");
//free mem.
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaFree(d_Buffer[i]));
checkCudaErrors(cudaFreeHost(h_Input[i]));
checkCudaErrors(cudaFreeHost(h_Kernel[i]));
checkCudaErrors(cudaFreeHost(h_OutputGPU[i]));
checkCudaErrors(cudaFree(d_Kernel[i]));
checkCudaErrors(cudaFree(d_Output[i]));
checkCudaErrors(cudaFree(d_Input[i]));
free(h_OutputCPU[i]);
free(h_Buffer[i]);
checkCudaErrors(cudaFree(pos_task_dev[i]));
}
free(d_Buffer);
free(h_Input);
free(h_Kernel);
free(d_Kernel);
free(d_Output);
free(d_Input);
free(h_OutputGPU);
free(num_thread);
free(num_size);
free(h_OutputCPU);
free(h_Buffer);
checkCudaErrors(cudaFree(num_thread_dev));
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowsGPU(
float *h_Dst,
float *h_Src,
float *h_Kernel,
int kernelR,
int *size,
int *thread,
int index
)
{
int x, k, d;
float sum;
int td;
int y = threadIdx.x;
int bk = blockIdx.x;
td = thread[index*TK_NUM+bk];
if(y < td)
for(x = 0; x < (td*td)/td; x++)
{
sum = 0;
for (k = -kernelR; k <= kernelR; k++)
{
d = ((x*td+y)%td) + k;
if (d >= 0 && d < td)
sum += h_Src[((x*td+y)/td) * td + d + size[bk]] * h_Kernel[kernelR - k];
}
h_Dst[(x*td+y)/td * td + ((x*td+y)%td) + size[bk]] = sum;
}
}
void convolutionRowsCPU(
float *h_Dst,
float *h_Src,
float *h_Kernel,
int kernelR,
int *size,
int *thread,
int index
)
{
int x, k, d, y, t;
float sum;
int td;
for(t = 0; t < TK_NUM; t++){
td = thread[index*TK_NUM+t];
for(y = 0; y < td; y++){
for(x = 0; x < td; x++)
{
sum = 0;
for (k = -kernelR; k <= kernelR; k++)
{
d = ((x*td+y)%td) + k;
if (d >= 0 && d < td)
sum += h_Src[((x*td+y)/td) * td + d + size[t]] * h_Kernel[kernelR - k];
}
h_Dst[(x*td+y)/td * td + ((x*td+y)%td) + size[t]] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnsGPU(
float *h_Dst,
float *h_Src,
float *h_Kernel,
int kernelR,
int *size,
int *thread,
int index
)
{
int x, k, d;
float sum;
int y = threadIdx.x;
int bk = blockIdx.x;
int td;
#if 1
td = thread[index*TK_NUM+bk];
if(y < td){
for(x = 0; x < (td*td)/td; x++)
{
sum = 0;
for (k = -kernelR; k <= kernelR; k++)
{
d = ((x*td+y)/td) + k;
if (d >= 0 && d < td)
sum += h_Src[d * td + (x*td+y)%td + size[bk]] * h_Kernel[kernelR - k];
}
h_Dst[(x*td+y)/td * td + (x*td+y)%td + size[bk]] = sum;
}
}
#endif
}
void convolutionColumnsCPU(
float *h_Dst,
float *h_Src,
float *h_Kernel,
int kernelR,
int *size,
int *thread,
int index
)
{
int x, k, d, y, t;
float sum;
int td;
for(t = 0; t < TK_NUM; t++){
td = thread[index*TK_NUM+t];
for(y = 0; y < td; y++){
for(x = 0; x< td; x++)
{
sum = 0;
for (k = -kernelR; k <= kernelR; k++)
{
d = (x*td+y)/td + k;
d = ((x*td+y)/td) + k;
if (d >= 0 && d < td)
sum += h_Src[d * td + (x*td+y)%td + size[t]] * h_Kernel[kernelR - k];
}
h_Dst[(x*td+y)/td * td + (x*td+y)%td + size[t]] = sum;
}
}
}
}
|
1d21d17d7749d5c6ec4221b88bded9b4b872aa6b.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "../include/common.h"
#include "../include/inviJoin.h"
#include "../include/gpuCudaLib.h"
#include "scanImpl.cu"
#ifdef HAS_GMM
#include "gmm.h"
#endif
__global__ static void count_hash_num(char *dim, long dNum,int *num){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=offset;i<dNum;i+=stride){
int joinKey = ((int *)dim)[i];
int hKey = joinKey % HSIZE;
atomicAdd(&(num[hKey]),1);
}
}
__global__ static void build_hash_table(char *dim, long dNum, int *psum, char * bucket){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=offset;i<dNum;i+=stride){
int joinKey = ((int *) dim)[i];
int hKey = joinKey % HSIZE;
int offset = atomicAdd(&psum[hKey],1) * 2;
((int*)bucket)[offset] = joinKey;
offset += 1;
int dimId = i+1;
((int*)bucket)[offset] = dimId;
}
}
__global__ static void count_join_result(int* hashNum, int* psum, char* bucket, char* fact, long fNum, int * factFilter){
int stride = blockDim.x * gridDim.x;
long offset = blockIdx.x*blockDim.x + threadIdx.x;
for(int i=offset;i<fNum;i+=stride){
int fKey = ((int *)fact)[i];
int hKey = fKey % HSIZE;
int keyNum = hashNum[hKey];
for(int j=0;j<keyNum;j++){
int pSum = psum[hKey];
int dimKey = ((int *)bucket)[2*j + 2*pSum];
if(dimKey == fKey){
int dimId = ((int *)bucket)[2*j + 2*pSum +1];
factFilter[i] = dimId;
break;
}
}
}
}
__global__ void static joinFact_other(int *resPsum, char * fact, int attrSize, long num, int * filter, char * result){
int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
long localOffset = resPsum[startIndex] * attrSize;
for(long i=startIndex;i<num;i+=stride){
if(filter[i] != 0){
memcpy(result + localOffset, fact + i*attrSize, attrSize);
localOffset += attrSize;
}
}
}
__global__ void static joinFact_int(int *resPsum, char * fact, int attrSize, long num, int * filter, char * result){
int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
long localCount = resPsum[startIndex];
for(long i=startIndex;i<num;i+=stride){
if(filter[i] != 0){
((int*)result)[localCount] = ((int *)fact)[i];
localCount ++;
}
}
}
__global__ void static joinDim_int(int *resPsum, char * dim, int attrSize, long num, int *factF,int * filter, char * result){
int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
long localCount = resPsum[startIndex];
for(long i=startIndex;i<num;i+=stride){
if( filter[i] != 0){
int dimId = factF[i];
((int*)result)[localCount] = ((int*)dim)[dimId-1];
localCount ++;
}
}
}
__global__ void static joinDim_other(int *resPsum, char * dim, int attrSize, long num, int* factF,int * filter, char * result){
int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
long localOffset = resPsum[startIndex] * attrSize;
for(long i=startIndex;i<num;i+=stride){
if( filter[i] != 0){
int dimId = factF[i];
memcpy(result + localOffset, dim + (dimId-1)* attrSize, attrSize);
localOffset += attrSize;
}
}
}
__global__ void static merge(int ** filter, long fNum, int dNum,int * result, int *count, int * totalCount){
int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int lcount = 0;
for(long i=startIndex; i<fNum; i+=stride){
int tmp = 1;
for(int j=0;j<dNum;j++){
if(filter[j][i] == 0){
tmp = 0;
break;
}
}
lcount += tmp;
result[i] = tmp;
}
count[startIndex] = lcount;
atomicAdd(totalCount,lcount);
}
static void buildHashPlan(long size, int * pass){
int gpuMem = getGpuGlobalMem(0);
*pass = 3*size / gpuMem + 1;
}
static void joinPlan(struct joinNode *jNode, int * pass){
int gpuMem = getGpuGlobalMem(0);
*pass = 1;
}
struct tableNode * inviJoin(struct joinNode *jNode, struct statistic *pp){
struct tableNode * res = NULL;
char ** gpu_fact;
char ** gpu_hash;
int ** gpuHashPsum;
int ** gpuHashNum;
int ** gpuFilter;
struct timespec start,end;
float gpuTime;
hipEvent_t startGPU, stopGPU;
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
clock_gettime(CLOCK_REALTIME,&start);
dim3 grid(1024);
dim3 block(256);
int blockNum = jNode->factTable->tupleNum / block.x + 1;
if(blockNum < 1024)
grid = blockNum;
int threadNum = grid.x * block.x;
res = (struct tableNode *) malloc(sizeof(struct tableNode));
res->tupleSize = jNode->tupleSize;
res->totalAttr = jNode->totalAttr;
res->attrType = (int *) malloc(sizeof(int) * res->totalAttr);
res->attrSize = (int *) malloc(sizeof(int) * res->totalAttr);
res->dataPos = (int *) malloc(sizeof(int) * res->totalAttr);
res->attrTotalSize = (int *) malloc(sizeof(int) * res->totalAttr);
res->dataFormat = (int *) malloc(res->totalAttr * sizeof(int));
res->content = (char **) malloc(sizeof(char *) * jNode->totalAttr );
for(int i=0;i<res->totalAttr;i++){
res->attrType[i] = jNode->attrType[i];
res->attrSize[i] = jNode->attrSize[i];
if(jNode->keepInGpu[i] == 1){
res->dataPos[i] = GPU;
}else{
res->dataPos[i] = MEM;
}
res->dataFormat[i] = UNCOMPRESSED;
}
gpuHashPsum = (int **) malloc(sizeof(int *) * jNode->dimNum);
gpuHashNum = (int **) malloc(sizeof(int *) * jNode->dimNum);
gpu_hash = (char **)malloc(sizeof(char *) * jNode->dimNum);
for(int k=0;k<jNode->dimNum;k++){
char *gpu_dim;
long primaryKeySize = sizeof(int) * jNode->dimTable[k]->tupleNum;
int dimIndex = jNode->dimIndex[k];
/*
* build hash table on GPU
*/
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&(gpuHashNum[k]),HSIZE * sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&(gpuHashPsum[k]), sizeof(int) * HSIZE));
CUDA_SAFE_CALL_NO_SYNC(hipMemset(gpuHashNum[k],0,sizeof(int) * HSIZE));
int pass = 0;
int dimInGpu = 0;
buildHashPlan(primaryKeySize,&pass);
if (pass != 1){
printf("Hash Table too large! not supported yet!");
exit(-1);
}
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&(gpu_hash[k]), 2 * primaryKeySize));
if(jNode->dimTable[k]->dataPos[dimIndex] == MEM){
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&gpu_dim,primaryKeySize));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpu_dim,jNode->dimTable[k]->content[dimIndex], primaryKeySize,hipMemcpyHostToDevice));
}else if(jNode->dimTable[k]->dataPos[dimIndex] == GPU){
dimInGpu = 1 ;
gpu_dim = jNode->dimTable[k]->content[dimIndex];
}
hipEventRecord(startGPU,0);
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(2, HINT_DEFAULT));
hipLaunchKernelGGL(( count_hash_num), dim3(grid),dim3(block), 0, 0, gpu_dim,jNode->dimTable[k]->tupleNum,gpuHashNum[k]);
CUDA_SAFE_CALL_NO_SYNC(hipDeviceSynchronize());
hipEventRecord(stopGPU,0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&gpuTime,startGPU,stopGPU);
pp->kernel += gpuTime;
//printf("GPU count hash result time:%lf\n",gpuTime);
scanImpl(gpuHashNum[k],HSIZE,gpuHashPsum[k], pp);
int * gpu_psum ;
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpu_psum, sizeof(int) * HSIZE));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpu_psum, gpuHashPsum[k], sizeof(int) * HSIZE, hipMemcpyDeviceToDevice));
hipEventRecord(startGPU,0);
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(2, HINT_DEFAULT));
GMM_CALL(cudaReference(3, HINT_WRITE));
hipLaunchKernelGGL(( build_hash_table), dim3(grid),dim3(block), 0, 0, gpu_dim,jNode->dimTable[k]->tupleNum,gpu_psum,gpu_hash[k]);
CUDA_SAFE_CALL_NO_SYNC(hipDeviceSynchronize());
CUDA_SAFE_CALL_NO_SYNC(hipEventRecord(stopGPU,0));
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&gpuTime,startGPU,stopGPU);
pp->kernel += gpuTime;
//printf("GPU build hash table time:%lf\n",gpuTime);
if(dimInGpu == 0)
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpu_dim));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpu_psum));
}
int filterSize = jNode->factTable->tupleNum * sizeof(int);
int *factInGpu = (int *) malloc(sizeof(int) * jNode->dimNum);
gpuFilter = (int **) malloc(sizeof(int *) * jNode->dimNum);
gpu_fact = (char **) malloc(sizeof(char *) * jNode->dimNum);
for(int k=0;k<jNode->dimNum;k++){
int index = jNode->factIndex[k];
if(jNode->factTable->dataPos[index] == MEM){
factInGpu[k] = 0;
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&(gpu_fact[k]), filterSize));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpu_fact[k],jNode->factTable->content[index], filterSize, hipMemcpyHostToDevice));
}else if(jNode->factTable->dataPos[index] == GPU){
factInGpu[k] = 1;
gpu_fact[k] = jNode->factTable->content[index];
}
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&(gpuFilter[k]), filterSize));
CUDA_SAFE_CALL_NO_SYNC(hipMemset(gpuFilter[k],0,filterSize));
}
hipEventRecord(startGPU,0);
for(int k=0;k<jNode->dimNum;k++) {
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(1, HINT_READ));
GMM_CALL(cudaReference(2, HINT_READ));
GMM_CALL(cudaReference(3, HINT_READ));
GMM_CALL(cudaReference(5, HINT_WRITE));
hipLaunchKernelGGL(( count_join_result), dim3(grid),dim3(block), 0, 0, gpuHashNum[k], gpuHashPsum[k], gpu_hash[k], gpu_fact[k], jNode->factTable->tupleNum, gpuFilter[k]);
}
CUDA_SAFE_CALL_NO_SYNC(hipDeviceSynchronize());
hipEventRecord(stopGPU,0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&gpuTime,startGPU,stopGPU);
//printf("GPU generate filter time:%lf\n",gpuTime);
pp->kernel += gpuTime;
for(int k=0;k<jNode->dimNum;k++){
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpu_hash[k]));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuHashPsum[k]));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuHashNum[k]));
if(factInGpu[k] == 0)
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpu_fact[k]));
}
free(gpu_hash);
free(gpuHashPsum);
free(gpuHashNum);
free(gpu_fact);
free(factInGpu);
int * gpuFinalFilter;
int * gpuCount, *gpuTotalCount;
int * gpuResPsum;
int ** gpuFilterAddr;
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **) &gpuFilterAddr, sizeof(int *) * jNode->dimNum));
for(int k=0;k<jNode->dimNum;k++){
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&(gpuFilterAddr[k]), &(gpuFilter[k]), sizeof(int *), hipMemcpyHostToDevice));
}
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuFinalFilter,sizeof(int) * jNode->factTable->tupleNum));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuCount,sizeof(int) * threadNum));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuTotalCount,sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(hipMemset(gpuTotalCount,0,sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuResPsum,sizeof(int) * threadNum));
hipEventRecord(startGPU,0);
GMM_CALL(cudaReference(0, HINT_READ|HINT_PTARRAY|HINT_PTAREAD));
GMM_CALL(cudaReference(3, HINT_WRITE));
GMM_CALL(cudaReference(4, HINT_WRITE));
GMM_CALL(cudaReference(5, HINT_DEFAULT));
hipLaunchKernelGGL(( merge), dim3(grid),dim3(block), 0, 0, gpuFilterAddr,jNode->factTable->tupleNum,jNode->dimNum,gpuFinalFilter, gpuCount,gpuTotalCount);
CUDA_SAFE_CALL_NO_SYNC(hipDeviceSynchronize());
hipEventRecord(stopGPU,0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&gpuTime,startGPU,stopGPU);
//printf("GPU merge filter time:%lf\n",gpuTime);
pp->kernel += gpuTime;
int totalCount = 0;
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&totalCount,gpuTotalCount,sizeof(int),hipMemcpyDeviceToHost));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuTotalCount));
res->tupleNum = totalCount;
for(int i=0;i<res->totalAttr;i++){
res->attrTotalSize[i] = totalCount * res->attrSize[i];
}
scanImpl(gpuCount,threadNum,gpuResPsum, pp);
gpu_fact = (char **) malloc(sizeof(char *) * jNode->totalAttr);
factInGpu = (int *) malloc(sizeof(int) * jNode->totalAttr);
char **gpuResult = (char **) malloc(sizeof(char *) * jNode->totalAttr);
int *attrSize = (int *) malloc(sizeof(int) * jNode->totalAttr);
int *attrType = (int *) malloc(sizeof(int) * jNode->totalAttr);
for(int i=0; i< jNode->factOutputNum;i++){
int index = jNode->factOutputIndex[i];
int aSize = jNode->factTable->attrSize[index];
int size = aSize * jNode->factTable->tupleNum;
attrSize[i] = aSize;
attrType[i] = jNode->factTable->attrType[index];
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&(gpuResult[i]),aSize * totalCount));
if(jNode->factTable->dataPos[index] == MEM ){
factInGpu[i] = 0;
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&(gpu_fact[i]),size));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpu_fact[i], jNode->factTable->content[index],aSize * jNode->factTable->tupleNum, hipMemcpyHostToDevice));
}else if(jNode->factTable->dataPos[index] == GPU){
factInGpu[i] = 1;
gpu_fact[i] = jNode->factTable->content[index];
}
}
int k = jNode->factOutputNum;
for(int i=0;i<jNode->dimNum;i++){
for(int j=0;j<jNode->dimOutputNum[i]; j++){
int index = jNode->dimOutputIndex[i][j];
int aSize = jNode->dimTable[i]->attrSize[index];
attrSize[k] = aSize;
attrType[k] = jNode->dimTable[i]->attrType[index];
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&(gpuResult[k]),aSize * totalCount));
if(jNode->dimTable[i]->dataPos[index] == MEM){
factInGpu[k] = 0;
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&(gpu_fact[k]),aSize*jNode->dimTable[i]->tupleNum));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpu_fact[k],jNode->dimTable[i]->content[index], aSize*jNode->dimTable[i]->tupleNum,hipMemcpyHostToDevice));
}else if (jNode->dimTable[i]->dataPos[index] ==GPU){
factInGpu[k] = 1;
gpu_fact[k] = jNode->dimTable[i]->content[index];
}
k++;
}
}
hipEventRecord(startGPU,0);
for(int i=0;i<jNode->factOutputNum;i++){
if(attrType[i] != STRING) {
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(1, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(5, HINT_WRITE));
hipLaunchKernelGGL(( joinFact_int), dim3(grid),dim3(block), 0, 0, gpuResPsum,gpu_fact[i],attrSize[i],jNode->factTable->tupleNum,gpuFinalFilter,gpuResult[i]);
} else {
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(1, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(5, HINT_WRITE));
hipLaunchKernelGGL(( joinFact_other), dim3(grid),dim3(block), 0, 0, gpuResPsum,gpu_fact[i],attrSize[i],jNode->factTable->tupleNum,gpuFinalFilter,gpuResult[i]);
}
}
k = jNode->factOutputNum;
for(int i=0;i<jNode->dimNum;i++){
for(int j=0;j<jNode->dimOutputNum[i];j++){
if (attrType[k] != STRING) {
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(1, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(5, HINT_READ));
GMM_CALL(cudaReference(6, HINT_WRITE));
hipLaunchKernelGGL(( joinDim_int), dim3(grid),dim3(block), 0, 0, gpuResPsum,gpu_fact[k],attrSize[k],jNode->factTable->tupleNum,gpuFilter[k],gpuFinalFilter,gpuResult[k]);
} else {
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(1, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(5, HINT_READ));
GMM_CALL(cudaReference(6, HINT_WRITE));
hipLaunchKernelGGL(( joinDim_other), dim3(grid),dim3(block), 0, 0, gpuResPsum,gpu_fact[k],attrSize[k],jNode->factTable->tupleNum,gpuFilter[k],gpuFinalFilter,gpuResult[k]);
}
k++;
}
}
CUDA_SAFE_CALL_NO_SYNC(hipDeviceSynchronize());
hipEventRecord(stopGPU,0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&gpuTime,startGPU,stopGPU);
pp->kernel += gpuTime;
//printf("GPU filter fact result time:%lf\n",gpuTime);
for(int i=0;i<jNode->dimNum;i++){
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuFilter[i]));
}
for(int i=0;i<jNode->totalAttr;i++){
if(factInGpu[i] == 0)
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpu_fact[i]));
}
for(int i=0;i<jNode->factOutputNum;i++){
int pos = jNode->factOutputPos[i];
if(res->dataPos[pos] == MEM){
res->content[pos] = (char *) malloc(res->tupleNum * res->attrSize[pos]);
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(res->content[pos], gpuResult[i], res->tupleNum * res->attrSize[pos],hipMemcpyDeviceToHost));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuResult[i]));
}else if(res->dataPos[pos] == GPU){
res->content[pos] = gpuResult[i];
}
}
for(int i=0;i<jNode->dimOutputTotal;i++){
int pos = jNode->dimOutputPos[i];
if(res->dataPos[pos] == MEM){
res->content[pos] = (char *) malloc(res->tupleNum * res->attrSize[pos]);
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(res->content[pos], gpuResult[i+jNode->factOutputNum], res->tupleNum * res->attrSize[pos],hipMemcpyDeviceToHost));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuResult[i+jNode->factOutputNum]));
}else if(res->dataPos[pos] == GPU){
res->content[pos] = gpuResult[i+jNode->factOutputNum];
}
}
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuFinalFilter));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuResPsum));
clock_gettime(CLOCK_REALTIME,&end);
double timeE = (end.tv_sec - start.tv_sec)* BILLION + end.tv_nsec - start.tv_nsec;
pp->total += timeE / (1000 * 1000);
return res;
}
| 1d21d17d7749d5c6ec4221b88bded9b4b872aa6b.cu | /*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
#include <string.h>
#include <cuda.h>
#include "../include/common.h"
#include "../include/inviJoin.h"
#include "../include/gpuCudaLib.h"
#include "scanImpl.cu"
#ifdef HAS_GMM
#include "gmm.h"
#endif
__global__ static void count_hash_num(char *dim, long dNum,int *num){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=offset;i<dNum;i+=stride){
int joinKey = ((int *)dim)[i];
int hKey = joinKey % HSIZE;
atomicAdd(&(num[hKey]),1);
}
}
__global__ static void build_hash_table(char *dim, long dNum, int *psum, char * bucket){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=offset;i<dNum;i+=stride){
int joinKey = ((int *) dim)[i];
int hKey = joinKey % HSIZE;
int offset = atomicAdd(&psum[hKey],1) * 2;
((int*)bucket)[offset] = joinKey;
offset += 1;
int dimId = i+1;
((int*)bucket)[offset] = dimId;
}
}
__global__ static void count_join_result(int* hashNum, int* psum, char* bucket, char* fact, long fNum, int * factFilter){
int stride = blockDim.x * gridDim.x;
long offset = blockIdx.x*blockDim.x + threadIdx.x;
for(int i=offset;i<fNum;i+=stride){
int fKey = ((int *)fact)[i];
int hKey = fKey % HSIZE;
int keyNum = hashNum[hKey];
for(int j=0;j<keyNum;j++){
int pSum = psum[hKey];
int dimKey = ((int *)bucket)[2*j + 2*pSum];
if(dimKey == fKey){
int dimId = ((int *)bucket)[2*j + 2*pSum +1];
factFilter[i] = dimId;
break;
}
}
}
}
__global__ void static joinFact_other(int *resPsum, char * fact, int attrSize, long num, int * filter, char * result){
int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
long localOffset = resPsum[startIndex] * attrSize;
for(long i=startIndex;i<num;i+=stride){
if(filter[i] != 0){
memcpy(result + localOffset, fact + i*attrSize, attrSize);
localOffset += attrSize;
}
}
}
__global__ void static joinFact_int(int *resPsum, char * fact, int attrSize, long num, int * filter, char * result){
int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
long localCount = resPsum[startIndex];
for(long i=startIndex;i<num;i+=stride){
if(filter[i] != 0){
((int*)result)[localCount] = ((int *)fact)[i];
localCount ++;
}
}
}
__global__ void static joinDim_int(int *resPsum, char * dim, int attrSize, long num, int *factF,int * filter, char * result){
int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
long localCount = resPsum[startIndex];
for(long i=startIndex;i<num;i+=stride){
if( filter[i] != 0){
int dimId = factF[i];
((int*)result)[localCount] = ((int*)dim)[dimId-1];
localCount ++;
}
}
}
__global__ void static joinDim_other(int *resPsum, char * dim, int attrSize, long num, int* factF,int * filter, char * result){
int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
long localOffset = resPsum[startIndex] * attrSize;
for(long i=startIndex;i<num;i+=stride){
if( filter[i] != 0){
int dimId = factF[i];
memcpy(result + localOffset, dim + (dimId-1)* attrSize, attrSize);
localOffset += attrSize;
}
}
}
__global__ void static merge(int ** filter, long fNum, int dNum,int * result, int *count, int * totalCount){
int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int lcount = 0;
for(long i=startIndex; i<fNum; i+=stride){
int tmp = 1;
for(int j=0;j<dNum;j++){
if(filter[j][i] == 0){
tmp = 0;
break;
}
}
lcount += tmp;
result[i] = tmp;
}
count[startIndex] = lcount;
atomicAdd(totalCount,lcount);
}
static void buildHashPlan(long size, int * pass){
int gpuMem = getGpuGlobalMem(0);
*pass = 3*size / gpuMem + 1;
}
static void joinPlan(struct joinNode *jNode, int * pass){
int gpuMem = getGpuGlobalMem(0);
*pass = 1;
}
struct tableNode * inviJoin(struct joinNode *jNode, struct statistic *pp){
struct tableNode * res = NULL;
char ** gpu_fact;
char ** gpu_hash;
int ** gpuHashPsum;
int ** gpuHashNum;
int ** gpuFilter;
struct timespec start,end;
float gpuTime;
cudaEvent_t startGPU, stopGPU;
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
clock_gettime(CLOCK_REALTIME,&start);
dim3 grid(1024);
dim3 block(256);
int blockNum = jNode->factTable->tupleNum / block.x + 1;
if(blockNum < 1024)
grid = blockNum;
int threadNum = grid.x * block.x;
res = (struct tableNode *) malloc(sizeof(struct tableNode));
res->tupleSize = jNode->tupleSize;
res->totalAttr = jNode->totalAttr;
res->attrType = (int *) malloc(sizeof(int) * res->totalAttr);
res->attrSize = (int *) malloc(sizeof(int) * res->totalAttr);
res->dataPos = (int *) malloc(sizeof(int) * res->totalAttr);
res->attrTotalSize = (int *) malloc(sizeof(int) * res->totalAttr);
res->dataFormat = (int *) malloc(res->totalAttr * sizeof(int));
res->content = (char **) malloc(sizeof(char *) * jNode->totalAttr );
for(int i=0;i<res->totalAttr;i++){
res->attrType[i] = jNode->attrType[i];
res->attrSize[i] = jNode->attrSize[i];
if(jNode->keepInGpu[i] == 1){
res->dataPos[i] = GPU;
}else{
res->dataPos[i] = MEM;
}
res->dataFormat[i] = UNCOMPRESSED;
}
gpuHashPsum = (int **) malloc(sizeof(int *) * jNode->dimNum);
gpuHashNum = (int **) malloc(sizeof(int *) * jNode->dimNum);
gpu_hash = (char **)malloc(sizeof(char *) * jNode->dimNum);
for(int k=0;k<jNode->dimNum;k++){
char *gpu_dim;
long primaryKeySize = sizeof(int) * jNode->dimTable[k]->tupleNum;
int dimIndex = jNode->dimIndex[k];
/*
* build hash table on GPU
*/
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&(gpuHashNum[k]),HSIZE * sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&(gpuHashPsum[k]), sizeof(int) * HSIZE));
CUDA_SAFE_CALL_NO_SYNC(cudaMemset(gpuHashNum[k],0,sizeof(int) * HSIZE));
int pass = 0;
int dimInGpu = 0;
buildHashPlan(primaryKeySize,&pass);
if (pass != 1){
printf("Hash Table too large! not supported yet!");
exit(-1);
}
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&(gpu_hash[k]), 2 * primaryKeySize));
if(jNode->dimTable[k]->dataPos[dimIndex] == MEM){
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpu_dim,primaryKeySize));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpu_dim,jNode->dimTable[k]->content[dimIndex], primaryKeySize,cudaMemcpyHostToDevice));
}else if(jNode->dimTable[k]->dataPos[dimIndex] == GPU){
dimInGpu = 1 ;
gpu_dim = jNode->dimTable[k]->content[dimIndex];
}
cudaEventRecord(startGPU,0);
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(2, HINT_DEFAULT));
count_hash_num<<<grid,block>>>(gpu_dim,jNode->dimTable[k]->tupleNum,gpuHashNum[k]);
CUDA_SAFE_CALL_NO_SYNC(cudaDeviceSynchronize());
cudaEventRecord(stopGPU,0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&gpuTime,startGPU,stopGPU);
pp->kernel += gpuTime;
//printf("GPU count hash result time:%lf\n",gpuTime);
scanImpl(gpuHashNum[k],HSIZE,gpuHashPsum[k], pp);
int * gpu_psum ;
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpu_psum, sizeof(int) * HSIZE));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpu_psum, gpuHashPsum[k], sizeof(int) * HSIZE, cudaMemcpyDeviceToDevice));
cudaEventRecord(startGPU,0);
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(2, HINT_DEFAULT));
GMM_CALL(cudaReference(3, HINT_WRITE));
build_hash_table<<<grid,block>>>(gpu_dim,jNode->dimTable[k]->tupleNum,gpu_psum,gpu_hash[k]);
CUDA_SAFE_CALL_NO_SYNC(cudaDeviceSynchronize());
CUDA_SAFE_CALL_NO_SYNC(cudaEventRecord(stopGPU,0));
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&gpuTime,startGPU,stopGPU);
pp->kernel += gpuTime;
//printf("GPU build hash table time:%lf\n",gpuTime);
if(dimInGpu == 0)
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpu_dim));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpu_psum));
}
int filterSize = jNode->factTable->tupleNum * sizeof(int);
int *factInGpu = (int *) malloc(sizeof(int) * jNode->dimNum);
gpuFilter = (int **) malloc(sizeof(int *) * jNode->dimNum);
gpu_fact = (char **) malloc(sizeof(char *) * jNode->dimNum);
for(int k=0;k<jNode->dimNum;k++){
int index = jNode->factIndex[k];
if(jNode->factTable->dataPos[index] == MEM){
factInGpu[k] = 0;
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&(gpu_fact[k]), filterSize));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpu_fact[k],jNode->factTable->content[index], filterSize, cudaMemcpyHostToDevice));
}else if(jNode->factTable->dataPos[index] == GPU){
factInGpu[k] = 1;
gpu_fact[k] = jNode->factTable->content[index];
}
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&(gpuFilter[k]), filterSize));
CUDA_SAFE_CALL_NO_SYNC(cudaMemset(gpuFilter[k],0,filterSize));
}
cudaEventRecord(startGPU,0);
for(int k=0;k<jNode->dimNum;k++) {
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(1, HINT_READ));
GMM_CALL(cudaReference(2, HINT_READ));
GMM_CALL(cudaReference(3, HINT_READ));
GMM_CALL(cudaReference(5, HINT_WRITE));
count_join_result<<<grid,block>>>(gpuHashNum[k], gpuHashPsum[k], gpu_hash[k], gpu_fact[k], jNode->factTable->tupleNum, gpuFilter[k]);
}
CUDA_SAFE_CALL_NO_SYNC(cudaDeviceSynchronize());
cudaEventRecord(stopGPU,0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&gpuTime,startGPU,stopGPU);
//printf("GPU generate filter time:%lf\n",gpuTime);
pp->kernel += gpuTime;
for(int k=0;k<jNode->dimNum;k++){
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpu_hash[k]));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuHashPsum[k]));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuHashNum[k]));
if(factInGpu[k] == 0)
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpu_fact[k]));
}
free(gpu_hash);
free(gpuHashPsum);
free(gpuHashNum);
free(gpu_fact);
free(factInGpu);
int * gpuFinalFilter;
int * gpuCount, *gpuTotalCount;
int * gpuResPsum;
int ** gpuFilterAddr;
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **) &gpuFilterAddr, sizeof(int *) * jNode->dimNum));
for(int k=0;k<jNode->dimNum;k++){
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&(gpuFilterAddr[k]), &(gpuFilter[k]), sizeof(int *), cudaMemcpyHostToDevice));
}
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuFinalFilter,sizeof(int) * jNode->factTable->tupleNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuCount,sizeof(int) * threadNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuTotalCount,sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(cudaMemset(gpuTotalCount,0,sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuResPsum,sizeof(int) * threadNum));
cudaEventRecord(startGPU,0);
GMM_CALL(cudaReference(0, HINT_READ|HINT_PTARRAY|HINT_PTAREAD));
GMM_CALL(cudaReference(3, HINT_WRITE));
GMM_CALL(cudaReference(4, HINT_WRITE));
GMM_CALL(cudaReference(5, HINT_DEFAULT));
merge<<<grid,block>>>(gpuFilterAddr,jNode->factTable->tupleNum,jNode->dimNum,gpuFinalFilter, gpuCount,gpuTotalCount);
CUDA_SAFE_CALL_NO_SYNC(cudaDeviceSynchronize());
cudaEventRecord(stopGPU,0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&gpuTime,startGPU,stopGPU);
//printf("GPU merge filter time:%lf\n",gpuTime);
pp->kernel += gpuTime;
int totalCount = 0;
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&totalCount,gpuTotalCount,sizeof(int),cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuTotalCount));
res->tupleNum = totalCount;
for(int i=0;i<res->totalAttr;i++){
res->attrTotalSize[i] = totalCount * res->attrSize[i];
}
scanImpl(gpuCount,threadNum,gpuResPsum, pp);
gpu_fact = (char **) malloc(sizeof(char *) * jNode->totalAttr);
factInGpu = (int *) malloc(sizeof(int) * jNode->totalAttr);
char **gpuResult = (char **) malloc(sizeof(char *) * jNode->totalAttr);
int *attrSize = (int *) malloc(sizeof(int) * jNode->totalAttr);
int *attrType = (int *) malloc(sizeof(int) * jNode->totalAttr);
for(int i=0; i< jNode->factOutputNum;i++){
int index = jNode->factOutputIndex[i];
int aSize = jNode->factTable->attrSize[index];
int size = aSize * jNode->factTable->tupleNum;
attrSize[i] = aSize;
attrType[i] = jNode->factTable->attrType[index];
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&(gpuResult[i]),aSize * totalCount));
if(jNode->factTable->dataPos[index] == MEM ){
factInGpu[i] = 0;
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&(gpu_fact[i]),size));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpu_fact[i], jNode->factTable->content[index],aSize * jNode->factTable->tupleNum, cudaMemcpyHostToDevice));
}else if(jNode->factTable->dataPos[index] == GPU){
factInGpu[i] = 1;
gpu_fact[i] = jNode->factTable->content[index];
}
}
int k = jNode->factOutputNum;
for(int i=0;i<jNode->dimNum;i++){
for(int j=0;j<jNode->dimOutputNum[i]; j++){
int index = jNode->dimOutputIndex[i][j];
int aSize = jNode->dimTable[i]->attrSize[index];
attrSize[k] = aSize;
attrType[k] = jNode->dimTable[i]->attrType[index];
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&(gpuResult[k]),aSize * totalCount));
if(jNode->dimTable[i]->dataPos[index] == MEM){
factInGpu[k] = 0;
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&(gpu_fact[k]),aSize*jNode->dimTable[i]->tupleNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpu_fact[k],jNode->dimTable[i]->content[index], aSize*jNode->dimTable[i]->tupleNum,cudaMemcpyHostToDevice));
}else if (jNode->dimTable[i]->dataPos[index] ==GPU){
factInGpu[k] = 1;
gpu_fact[k] = jNode->dimTable[i]->content[index];
}
k++;
}
}
cudaEventRecord(startGPU,0);
for(int i=0;i<jNode->factOutputNum;i++){
if(attrType[i] != STRING) {
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(1, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(5, HINT_WRITE));
joinFact_int<<<grid,block>>>(gpuResPsum,gpu_fact[i],attrSize[i],jNode->factTable->tupleNum,gpuFinalFilter,gpuResult[i]);
} else {
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(1, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(5, HINT_WRITE));
joinFact_other<<<grid,block>>>(gpuResPsum,gpu_fact[i],attrSize[i],jNode->factTable->tupleNum,gpuFinalFilter,gpuResult[i]);
}
}
k = jNode->factOutputNum;
for(int i=0;i<jNode->dimNum;i++){
for(int j=0;j<jNode->dimOutputNum[i];j++){
if (attrType[k] != STRING) {
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(1, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(5, HINT_READ));
GMM_CALL(cudaReference(6, HINT_WRITE));
joinDim_int<<<grid,block>>>(gpuResPsum,gpu_fact[k],attrSize[k],jNode->factTable->tupleNum,gpuFilter[k],gpuFinalFilter,gpuResult[k]);
} else {
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(1, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(5, HINT_READ));
GMM_CALL(cudaReference(6, HINT_WRITE));
joinDim_other<<<grid,block>>>(gpuResPsum,gpu_fact[k],attrSize[k],jNode->factTable->tupleNum,gpuFilter[k],gpuFinalFilter,gpuResult[k]);
}
k++;
}
}
CUDA_SAFE_CALL_NO_SYNC(cudaDeviceSynchronize());
cudaEventRecord(stopGPU,0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&gpuTime,startGPU,stopGPU);
pp->kernel += gpuTime;
//printf("GPU filter fact result time:%lf\n",gpuTime);
for(int i=0;i<jNode->dimNum;i++){
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuFilter[i]));
}
for(int i=0;i<jNode->totalAttr;i++){
if(factInGpu[i] == 0)
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpu_fact[i]));
}
for(int i=0;i<jNode->factOutputNum;i++){
int pos = jNode->factOutputPos[i];
if(res->dataPos[pos] == MEM){
res->content[pos] = (char *) malloc(res->tupleNum * res->attrSize[pos]);
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(res->content[pos], gpuResult[i], res->tupleNum * res->attrSize[pos],cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuResult[i]));
}else if(res->dataPos[pos] == GPU){
res->content[pos] = gpuResult[i];
}
}
for(int i=0;i<jNode->dimOutputTotal;i++){
int pos = jNode->dimOutputPos[i];
if(res->dataPos[pos] == MEM){
res->content[pos] = (char *) malloc(res->tupleNum * res->attrSize[pos]);
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(res->content[pos], gpuResult[i+jNode->factOutputNum], res->tupleNum * res->attrSize[pos],cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuResult[i+jNode->factOutputNum]));
}else if(res->dataPos[pos] == GPU){
res->content[pos] = gpuResult[i+jNode->factOutputNum];
}
}
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuFinalFilter));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuResPsum));
clock_gettime(CLOCK_REALTIME,&end);
double timeE = (end.tv_sec - start.tv_sec)* BILLION + end.tv_nsec - start.tv_nsec;
pp->total += timeE / (1000 * 1000);
return res;
}
|
6b46d3d214b9a5f6b7ba90ba5e40bb08df4f2b50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
__global__ void matrix_mul(float *A, float *B, float *C, int col_a, int col_b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int sum = 0;
for(int k = 0; k < col_a; k++) {
sum += A[i*col_a + k]*B[k*col_b+j];
}
C[i*col_b+j] = sum;
}
void cudaMul(float *A, float *dB, float *C, int m, int n, int k, int rank, int num_thread, int blocksize){
int q = m/num_thread;
int r = m%num_thread;
int first,count;
count = q;
if(rank < r) {
count = q + 1;
first = count * rank;
}
else {
first = count * rank + r;
}
float * dA,*dC;
hipMalloc((void**)&dA, sizeof(int)*count*n);
hipMalloc((void**)&dC,sizeof(float)*count*k);
hipMemcpy(dA,A+first*n,sizeof(float)*count*n,hipMemcpyHostToDevice);
int blockx = pow(2,(int)(log2(blocksize))/2);
int blocky;
if(blockx*blockx == blocksize) blocky = blockx;
else blocky = 2*blockx;
dim3 Block(blockx, blocky);
dim3 grid((count+block.x-1)/block.x,(k+block.y-1)/block.y);
hipLaunchKernelGGL(( matrix_mul), dim3(grid),dim3(block), 0, 0, dA,dB,dC,n,k);
hipMemcpy(C+first*k,dC,sizeof(float)*count*k,hipMemcpyDeviceToHost);
hipFree(dA);
hipFree(dC);
}
int main(int argc , char *argv[]) {
if (argc < 5)
{
printf("lack of initial arguments !\n");
return 0;
}
int m,n,k,blocksize,num_thread;
// m , n , k , num_thread()
m = atoi(argv[1]);
n = atoi(argv[2]);
k = atoi(argv[3]);
blocksize = atoi(argv[4]);
num_thread = atoi(argv[5]);
float *A = (float *)malloc(sizeof(float) * m * n);
float *B = (float *)malloc(sizeof(float) * n * k);
float *C = (float *)malloc(sizeof(float) * m * k);
for (int i = 0; i < m*n; i++) {
srand(i);
A[i] = rand() % 10;
}
for (int i = 0; i < n*k; i++) {
srand(i);
B[i] = rand() % 10;
}
float *dB;
hipMalloc((void**)&dB, sizeof(float)*n*k);
struct timeval start, end;
gettimeofday( &start, NULL );
hipMemcpy(dB,B,sizeof(float)*n*k,hipMemcpyHostToDevice);
#pragma omp parallel num_threads(num_thread)
{
cudaMul(A,dB,C,m,n,k,omp_get_thread_num(),num_thread,blocksize);
}
hipFree(dB);
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
printf("block (%d,%d)\nnumber of threads %d\n",blocksize,blocksize,num_thread);
printf("total time is %f ms\n", timeuse/(float)1000);
FILE *a, *b, *c;
a = fopen("matrixA.m", "wb");
b = fopen("matrixB.m", "wb");
c = fopen("matrixC.m", "wb");
fprintf(a, "A = [ \n");
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
fprintf(a, "%f ", A[i * n + j]);
fprintf(a, "\n");
}
fprintf(a, "];");
fprintf(b, "B = [ \n");
for (int i = 0; i < n; i++)
{
for (int j = 0; j < k; j++)
fprintf(b, "%f ", B[i * k + j]);
fprintf(b, "\n");
}
fprintf(b, "];");
fprintf(c, "C = [ \n");
for (int i = 0; i < m; i++)
{
for (int j = 0; j < k; j++)
fprintf(c, "%f ", C[i * k + j]);
fprintf(c, "\n");
}
fprintf(c, "];");
free(A);
free(B);
free(C);
return 0;
} | 6b46d3d214b9a5f6b7ba90ba5e40bb08df4f2b50.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
__global__ void matrix_mul(float *A, float *B, float *C, int col_a, int col_b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int sum = 0;
for(int k = 0; k < col_a; k++) {
sum += A[i*col_a + k]*B[k*col_b+j];
}
C[i*col_b+j] = sum;
}
void cudaMul(float *A, float *dB, float *C, int m, int n, int k, int rank, int num_thread, int blocksize){
int q = m/num_thread;
int r = m%num_thread;
int first,count;
count = q;
if(rank < r) {
count = q + 1;
first = count * rank;
}
else {
first = count * rank + r;
}
float * dA,*dC;
cudaMalloc((void**)&dA, sizeof(int)*count*n);
cudaMalloc((void**)&dC,sizeof(float)*count*k);
cudaMemcpy(dA,A+first*n,sizeof(float)*count*n,cudaMemcpyHostToDevice);
int blockx = pow(2,(int)(log2(blocksize))/2);
int blocky;
if(blockx*blockx == blocksize) blocky = blockx;
else blocky = 2*blockx;
dim3 Block(blockx, blocky);
dim3 grid((count+block.x-1)/block.x,(k+block.y-1)/block.y);
matrix_mul<<<grid,block>>>(dA,dB,dC,n,k);
cudaMemcpy(C+first*k,dC,sizeof(float)*count*k,cudaMemcpyDeviceToHost);
cudaFree(dA);
cudaFree(dC);
}
int main(int argc , char *argv[]) {
if (argc < 5)
{
printf("lack of initial arguments !\n");
return 0;
}
int m,n,k,blocksize,num_thread;
// m , n , k , num_thread(运行线程数量) 初始化
m = atoi(argv[1]);
n = atoi(argv[2]);
k = atoi(argv[3]);
blocksize = atoi(argv[4]);
num_thread = atoi(argv[5]);
float *A = (float *)malloc(sizeof(float) * m * n);
float *B = (float *)malloc(sizeof(float) * n * k);
float *C = (float *)malloc(sizeof(float) * m * k);
for (int i = 0; i < m*n; i++) {
srand(i);
A[i] = rand() % 10;
}
for (int i = 0; i < n*k; i++) {
srand(i);
B[i] = rand() % 10;
}
float *dB;
cudaMalloc((void**)&dB, sizeof(float)*n*k);
struct timeval start, end;
gettimeofday( &start, NULL );
cudaMemcpy(dB,B,sizeof(float)*n*k,cudaMemcpyHostToDevice);
#pragma omp parallel num_threads(num_thread)
{
cudaMul(A,dB,C,m,n,k,omp_get_thread_num(),num_thread,blocksize);
}
cudaFree(dB);
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
printf("block (%d,%d)\nnumber of threads %d\n",blocksize,blocksize,num_thread);
printf("total time is %f ms\n", timeuse/(float)1000);
FILE *a, *b, *c;
a = fopen("matrixA.m", "wb");
b = fopen("matrixB.m", "wb");
c = fopen("matrixC.m", "wb");
fprintf(a, "A = [ \n");
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
fprintf(a, "%f ", A[i * n + j]);
fprintf(a, "\n");
}
fprintf(a, "];");
fprintf(b, "B = [ \n");
for (int i = 0; i < n; i++)
{
for (int j = 0; j < k; j++)
fprintf(b, "%f ", B[i * k + j]);
fprintf(b, "\n");
}
fprintf(b, "];");
fprintf(c, "C = [ \n");
for (int i = 0; i < m; i++)
{
for (int j = 0; j < k; j++)
fprintf(c, "%f ", C[i * k + j]);
fprintf(c, "\n");
}
fprintf(c, "];");
free(A);
free(B);
free(C);
return 0;
} |
410dfd3ff87844915db386bb8be9360c28c4ff94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "global.h"
#include "kernels.hip"
#include "swat_cuda.h"
struct timeval start_t1[10], end_t1[10];
#define TIMER_START(t) gettimeofday(&(start_t1[t]), NULL)
#define TIMER_END(t) gettimeofday(&(end_t1[t]), NULL)
#define MICRO_SECONDS(t) ((end_t1[t].tv_sec - start_t1[t].tv_sec)*1e6 + (end_t1[t].tv_usec - start_t1[t].tv_usec))
double swat_kernel_t = 0;
CigarOp makeElement(int state, int segment_length)
{
char o = '\0';
switch(state) {
case MSTATE:
o = 'M';
break;
case ISTATE:
o = 'I';
break;
case DSTATE:
o = 'D';
break;
case 'S':
o = 'S';
break;
}
return CigarOp(o, segment_length);
}
int swat(const byte a[], unsigned int an, const int a_offset[], const int ans[], const int n_a,
const byte b[], unsigned int bn, const int b_offset[], const int bns[], const int n_b,
float openPenalty, float extensionPenalty, float match, float mismatch, vector< vector<CigarOp> >& Cigars, int *alignment_offset, int max_len)
{
hipError_t cudaRes0, cudaRes1;
int blockSize = 128;
int subSequenceNum = n_b;
char *pathFlagD;
char *extFlagD;
int maxElemNum = (max_len + 1);
int maxMatrix = maxElemNum * 101;
int *alignment_offsetD;
for(int i = 0; i < n_a; i++) {
if(ans[i] > MAX_LEN) {
printf("an MAX: %d\n", ans[i]);
}
}
for(int i = 0; i < n_b; i++) {
if(bns[i] > MAX_LEN) {
printf("bn MAX: %d\n", bns[i]);
}
}
cudaRes0 = hipMalloc((void **)&alignment_offsetD, sizeof(int) * subSequenceNum);
if(cudaRes0 != hipSuccess) {
printf("Allocate sequence buffer on device error, %d\n",
cudaRes0);
return 1;
}
char *seq1D, *seq2D;
cudaRes0 = hipMalloc((void **)&seq1D, sizeof(char) * an);
cudaRes1 = hipMalloc((void **)&seq2D, sizeof(char) * bn);
if(cudaRes0 != hipSuccess ||
cudaRes1 != hipSuccess) {
printf("Allocate sequence buffer on device error, %d, %d\n",
cudaRes0,
cudaRes1);
return 1;
}
int *seq1_offD, *seq2_offD;
cudaRes0 = hipMalloc((void **)&seq1_offD, sizeof(int) * subSequenceNum);
cudaRes1 = hipMalloc((void **)&seq2_offD, sizeof(int) * subSequenceNum);
if(cudaRes0 != hipSuccess ||
cudaRes1 != hipSuccess) {
printf("Allocate sequence buffer on device error, %d, %d\n",
cudaRes0,
cudaRes1);
return 1;
}
int *rowNumsD, *columnNumsD;
cudaRes0 = hipMalloc((void **)&rowNumsD, sizeof(int) * subSequenceNum);
cudaRes1 = hipMalloc((void **)&columnNumsD, sizeof(int) * subSequenceNum);
if(cudaRes0 != hipSuccess ||
cudaRes1 != hipSuccess) {
printf("Allocate sequence buffer on device error, %d, %d\n",
cudaRes0,
cudaRes1);
return 1;
}
cudaRes0 = hipMalloc((void **)&pathFlagD, sizeof(char) * maxMatrix * subSequenceNum);
cudaRes1 = hipMalloc((void **)&extFlagD, sizeof(char) * maxMatrix * subSequenceNum);
if((cudaRes0 != hipSuccess) ||
(cudaRes1 != hipSuccess)) {
printf("cuda allocate DP matrices on device error: %d, %d\n",
cudaRes0,
cudaRes1);
return 1;
}
SEQ_ALIGN *element, *elementD;
element = new SEQ_ALIGN[100 * subSequenceNum];
cudaRes0 = hipMalloc((void **)&elementD, sizeof(SEQ_ALIGN) * 100 * subSequenceNum);
if(cudaRes0 != hipSuccess) {
printf("Allocate maxInfo on device error!\n");
return 1;
}
int *n_element, *n_elementD;
n_element = new int[subSequenceNum];
cudaRes0 = hipMalloc((void **)&n_elementD, sizeof(int) * subSequenceNum);
if(cudaRes0 != hipSuccess) {
printf("Allocate maxInfo on device error!\n");
return 1;
}
//Initialize DP matrices
hipMemset(pathFlagD, 0, maxMatrix * sizeof(char) * subSequenceNum);
hipMemset(extFlagD, 0, maxMatrix * sizeof(char) * subSequenceNum);
cudaRes0 = hipGetLastError();
if(cudaRes0 != hipSuccess) {
printf("hipMemset Error: %d %s\n", cudaRes0, hipGetErrorString(cudaRes0));
}
//copy input sequences to device
hipMemcpy(seq1D, a, an * sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(seq2D, b, bn * sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(seq1_offD, a_offset, subSequenceNum * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(seq2_offD, b_offset, subSequenceNum * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(rowNumsD, ans, subSequenceNum * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(columnNumsD, bns, subSequenceNum * sizeof(int), hipMemcpyHostToDevice);
cudaRes0 = hipGetLastError();
if(cudaRes0 != hipSuccess) {
printf("hipMemcpy Error: %d %s\n", cudaRes0, hipGetErrorString(cudaRes0));
}
TIMER_START(0);
hipLaunchKernelGGL(( MatchString) , dim3(subSequenceNum), dim3(blockSize), 0, 0,
pathFlagD,
extFlagD,
seq1D,
seq2D,
seq1_offD,
seq2_offD,
openPenalty,
extensionPenalty,
match,
mismatch,
rowNumsD,
columnNumsD,
elementD,
n_elementD,
alignment_offsetD,
maxMatrix
);
hipDeviceSynchronize();
TIMER_END(0);
swat_kernel_t += MICRO_SECONDS(0);
cudaRes0 = hipGetLastError();
if(cudaRes0 != 0)
printf("Error: %d %s\n", cudaRes0, hipGetErrorString(cudaRes0));
//copy matrix score structure back
hipMemcpy(element, elementD, sizeof(SEQ_ALIGN) * 100 * subSequenceNum, hipMemcpyDeviceToHost);
hipMemcpy(n_element, n_elementD, sizeof(int) * subSequenceNum, hipMemcpyDeviceToHost);
hipMemcpy(alignment_offset, alignment_offsetD, sizeof(int) * subSequenceNum, hipMemcpyDeviceToHost);
for(int k = 0; k < subSequenceNum; k++) {
vector<CigarOp> lce;
for(int t = n_element[k] - 1; t >= 0; t--) {
lce.push_back(makeElement(element[t + k * 100].o, element[t + k * 100].length));
}
Cigars.push_back(lce);
}
hipFree(seq1D);
hipFree(seq2D);
hipFree(pathFlagD);
hipFree(extFlagD);
hipFree(alignment_offsetD);
free(element);
hipFree(elementD);
free(n_element);
hipFree(n_elementD);
return 0;
}
| 410dfd3ff87844915db386bb8be9360c28c4ff94.cu | #include "global.h"
#include "kernels.cu"
#include "swat_cuda.h"
struct timeval start_t1[10], end_t1[10];
#define TIMER_START(t) gettimeofday(&(start_t1[t]), NULL)
#define TIMER_END(t) gettimeofday(&(end_t1[t]), NULL)
#define MICRO_SECONDS(t) ((end_t1[t].tv_sec - start_t1[t].tv_sec)*1e6 + (end_t1[t].tv_usec - start_t1[t].tv_usec))
double swat_kernel_t = 0;
CigarOp makeElement(int state, int segment_length)
{
char o = '\0';
switch(state) {
case MSTATE:
o = 'M';
break;
case ISTATE:
o = 'I';
break;
case DSTATE:
o = 'D';
break;
case 'S':
o = 'S';
break;
}
return CigarOp(o, segment_length);
}
int swat(const byte a[], unsigned int an, const int a_offset[], const int ans[], const int n_a,
const byte b[], unsigned int bn, const int b_offset[], const int bns[], const int n_b,
float openPenalty, float extensionPenalty, float match, float mismatch, vector< vector<CigarOp> >& Cigars, int *alignment_offset, int max_len)
{
cudaError_t cudaRes0, cudaRes1;
int blockSize = 128;
int subSequenceNum = n_b;
char *pathFlagD;
char *extFlagD;
int maxElemNum = (max_len + 1);
int maxMatrix = maxElemNum * 101;
int *alignment_offsetD;
for(int i = 0; i < n_a; i++) {
if(ans[i] > MAX_LEN) {
printf("an MAX: %d\n", ans[i]);
}
}
for(int i = 0; i < n_b; i++) {
if(bns[i] > MAX_LEN) {
printf("bn MAX: %d\n", bns[i]);
}
}
cudaRes0 = cudaMalloc((void **)&alignment_offsetD, sizeof(int) * subSequenceNum);
if(cudaRes0 != cudaSuccess) {
printf("Allocate sequence buffer on device error, %d\n",
cudaRes0);
return 1;
}
char *seq1D, *seq2D;
cudaRes0 = cudaMalloc((void **)&seq1D, sizeof(char) * an);
cudaRes1 = cudaMalloc((void **)&seq2D, sizeof(char) * bn);
if(cudaRes0 != cudaSuccess ||
cudaRes1 != cudaSuccess) {
printf("Allocate sequence buffer on device error, %d, %d\n",
cudaRes0,
cudaRes1);
return 1;
}
int *seq1_offD, *seq2_offD;
cudaRes0 = cudaMalloc((void **)&seq1_offD, sizeof(int) * subSequenceNum);
cudaRes1 = cudaMalloc((void **)&seq2_offD, sizeof(int) * subSequenceNum);
if(cudaRes0 != cudaSuccess ||
cudaRes1 != cudaSuccess) {
printf("Allocate sequence buffer on device error, %d, %d\n",
cudaRes0,
cudaRes1);
return 1;
}
int *rowNumsD, *columnNumsD;
cudaRes0 = cudaMalloc((void **)&rowNumsD, sizeof(int) * subSequenceNum);
cudaRes1 = cudaMalloc((void **)&columnNumsD, sizeof(int) * subSequenceNum);
if(cudaRes0 != cudaSuccess ||
cudaRes1 != cudaSuccess) {
printf("Allocate sequence buffer on device error, %d, %d\n",
cudaRes0,
cudaRes1);
return 1;
}
cudaRes0 = cudaMalloc((void **)&pathFlagD, sizeof(char) * maxMatrix * subSequenceNum);
cudaRes1 = cudaMalloc((void **)&extFlagD, sizeof(char) * maxMatrix * subSequenceNum);
if((cudaRes0 != cudaSuccess) ||
(cudaRes1 != cudaSuccess)) {
printf("cuda allocate DP matrices on device error: %d, %d\n",
cudaRes0,
cudaRes1);
return 1;
}
SEQ_ALIGN *element, *elementD;
element = new SEQ_ALIGN[100 * subSequenceNum];
cudaRes0 = cudaMalloc((void **)&elementD, sizeof(SEQ_ALIGN) * 100 * subSequenceNum);
if(cudaRes0 != cudaSuccess) {
printf("Allocate maxInfo on device error!\n");
return 1;
}
int *n_element, *n_elementD;
n_element = new int[subSequenceNum];
cudaRes0 = cudaMalloc((void **)&n_elementD, sizeof(int) * subSequenceNum);
if(cudaRes0 != cudaSuccess) {
printf("Allocate maxInfo on device error!\n");
return 1;
}
//Initialize DP matrices
cudaMemset(pathFlagD, 0, maxMatrix * sizeof(char) * subSequenceNum);
cudaMemset(extFlagD, 0, maxMatrix * sizeof(char) * subSequenceNum);
cudaRes0 = cudaGetLastError();
if(cudaRes0 != cudaSuccess) {
printf("cudaMemset Error: %d %s\n", cudaRes0, cudaGetErrorString(cudaRes0));
}
//copy input sequences to device
cudaMemcpy(seq1D, a, an * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(seq2D, b, bn * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(seq1_offD, a_offset, subSequenceNum * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(seq2_offD, b_offset, subSequenceNum * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(rowNumsD, ans, subSequenceNum * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(columnNumsD, bns, subSequenceNum * sizeof(int), cudaMemcpyHostToDevice);
cudaRes0 = cudaGetLastError();
if(cudaRes0 != cudaSuccess) {
printf("cudaMemcpy Error: %d %s\n", cudaRes0, cudaGetErrorString(cudaRes0));
}
TIMER_START(0);
MatchString <<< subSequenceNum, blockSize>>>(
pathFlagD,
extFlagD,
seq1D,
seq2D,
seq1_offD,
seq2_offD,
openPenalty,
extensionPenalty,
match,
mismatch,
rowNumsD,
columnNumsD,
elementD,
n_elementD,
alignment_offsetD,
maxMatrix
);
cudaThreadSynchronize();
TIMER_END(0);
swat_kernel_t += MICRO_SECONDS(0);
cudaRes0 = cudaGetLastError();
if(cudaRes0 != 0)
printf("Error: %d %s\n", cudaRes0, cudaGetErrorString(cudaRes0));
//copy matrix score structure back
cudaMemcpy(element, elementD, sizeof(SEQ_ALIGN) * 100 * subSequenceNum, cudaMemcpyDeviceToHost);
cudaMemcpy(n_element, n_elementD, sizeof(int) * subSequenceNum, cudaMemcpyDeviceToHost);
cudaMemcpy(alignment_offset, alignment_offsetD, sizeof(int) * subSequenceNum, cudaMemcpyDeviceToHost);
for(int k = 0; k < subSequenceNum; k++) {
vector<CigarOp> lce;
for(int t = n_element[k] - 1; t >= 0; t--) {
lce.push_back(makeElement(element[t + k * 100].o, element[t + k * 100].length));
}
Cigars.push_back(lce);
}
cudaFree(seq1D);
cudaFree(seq2D);
cudaFree(pathFlagD);
cudaFree(extFlagD);
cudaFree(alignment_offsetD);
free(element);
cudaFree(elementD);
free(n_element);
cudaFree(n_elementD);
return 0;
}
|
0ca76d1d1a2eb6f5130e2bce80c1e658a4cb6bf2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.numIterations1 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* Matrix transpose with Cuda
* Host code.
* This example transposes arbitrary-size matrices. It compares a naive
* transpose kernel that suffers from non-coalesced writes, to an optimized
* transpose with fully coalesced memory access and no bank conflicts. On
* a G80 GPU, the optimized transpose can be more than 10x faster for large
* matrices.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
#define BLOCK_DIM 16
// This kernel is optimized to ensure all global reads and writes are coalesced,
// and to avoid bank conflicts in shared memory. This kernel is up to 11x faster
// than the naive kernel below. Note that the shared memory array is sized to
// (BLOCK_DIM+1)*BLOCK_DIM. This pads each row of the 2D block in shared memory
// so that bank conflicts do not occur when threads address the array column-wise.
__global__ void transpose(float *odata, float *idata, int width, int height)
{
__shared__ float block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
// load one element per thread from device memory (idata) and store it
// in transposed order in block[][]
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
// synchronise to ensure all writes to block[][] have completed
__syncthreads();
// write the transposed matrix tile to global memory (odata) in linear order
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
// This naive transpose kernel suffers from completely non-coalesced writes.
// It can be up to 10x slower than the kernel above for large matrices.
__global__ void transpose_naive(float *odata, float* idata, int width, int height)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if (xIndex < width && yIndex < height)
{
unsigned int index_in = xIndex + width * yIndex;
unsigned int index_out = yIndex + height * xIndex;
odata[index_out] = idata[index_in];
}
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C" void computeGold( float* reference, float* idata,
const unsigned int size_x, const unsigned int size_y );
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
// size of the matrix
#ifdef __DEVICE_EMULATION__
const unsigned int size_x = 32;
const unsigned int size_y = 128;
#else
const unsigned int size_x = 256;
const unsigned int size_y = 4096;
#endif
// size of memory required to store the matrix
const unsigned int mem_size = sizeof(float) * size_x * size_y;
unsigned int timer;
cutCreateTimer(&timer);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
// allocate host memory
float* h_idata = (float*) malloc(mem_size);
// initalize the memory
srand(15235911);
for( unsigned int i = 0; i < (size_x * size_y); ++i)
{
h_idata[i] = (float) i; // rand();
}
// allocate device memory
float* d_idata;
float* d_odata;
cutilSafeCall( hipMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( hipMalloc( (void**) &d_odata, mem_size));
// copy host memory to device
cutilSafeCall( hipMemcpy( d_idata, h_idata, mem_size,
hipMemcpyHostToDevice) );
// setup execution parameters
dim3 grid(size_x / BLOCK_DIM, size_y / BLOCK_DIM, 1);
dim3 threads(BLOCK_DIM, BLOCK_DIM, 1);
// warmup so we don't time CUDA startup
hipLaunchKernelGGL(( transpose_naive), dim3(grid), dim3(threads) , 0, 0, d_odata, d_idata, size_x, size_y);
hipLaunchKernelGGL(( transpose), dim3(grid), dim3(threads) , 0, 0, d_odata, d_idata, size_x, size_y);
// synchronize here, so we make sure that we don't count any time from the asynchronize kernel launches.
hipDeviceSynchronize();
int numIterations = 1;
printf("Transposing a %d by %d matrix of floats...\n", size_x, size_y);
// execute the kernel
cutStartTimer(timer);
for (int i = 0; i < numIterations; ++i)
{
hipLaunchKernelGGL(( transpose_naive), dim3(grid), dim3(threads) , 0, 0, d_odata, d_idata, size_x, size_y);
}
hipDeviceSynchronize();
cutStopTimer(timer);
float naiveTime = cutGetTimerValue(timer);
// execute the kernel
cutResetTimer(timer);
cutStartTimer(timer);
for (int i = 0; i < numIterations; ++i)
{
hipLaunchKernelGGL(( transpose), dim3(grid), dim3(threads) , 0, 0, d_odata, d_idata, size_x, size_y);
}
hipDeviceSynchronize();
cutStopTimer(timer);
float optimizedTime = cutGetTimerValue(timer);
printf("Naive transpose average time: %0.3f ms\n", naiveTime / numIterations);
printf("Optimized transpose average time: %0.3f ms\n\n", optimizedTime / numIterations);
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
// copy result from device to host
float* h_odata = (float*) malloc(mem_size);
cutilSafeCall( hipMemcpy( h_odata, d_odata, mem_size,
hipMemcpyDeviceToHost) );
// compute reference solution
float* reference = (float*) malloc( mem_size);
computeGold( reference, h_idata, size_x, size_y);
// check result
CUTBoolean res = cutComparef( reference, h_odata, size_x * size_y);
printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// cleanup memory
free(h_idata);
free(h_odata);
free( reference);
cutilSafeCall(hipFree(d_idata));
cutilSafeCall(hipFree(d_odata));
cutilCheckError( cutDeleteTimer(timer));
hipDeviceReset();
}
| 0ca76d1d1a2eb6f5130e2bce80c1e658a4cb6bf2.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.numIterations1 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* Matrix transpose with Cuda
* Host code.
* This example transposes arbitrary-size matrices. It compares a naive
* transpose kernel that suffers from non-coalesced writes, to an optimized
* transpose with fully coalesced memory access and no bank conflicts. On
* a G80 GPU, the optimized transpose can be more than 10x faster for large
* matrices.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
#define BLOCK_DIM 16
// This kernel is optimized to ensure all global reads and writes are coalesced,
// and to avoid bank conflicts in shared memory. This kernel is up to 11x faster
// than the naive kernel below. Note that the shared memory array is sized to
// (BLOCK_DIM+1)*BLOCK_DIM. This pads each row of the 2D block in shared memory
// so that bank conflicts do not occur when threads address the array column-wise.
__global__ void transpose(float *odata, float *idata, int width, int height)
{
__shared__ float block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
// load one element per thread from device memory (idata) and store it
// in transposed order in block[][]
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
// synchronise to ensure all writes to block[][] have completed
__syncthreads();
// write the transposed matrix tile to global memory (odata) in linear order
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
// This naive transpose kernel suffers from completely non-coalesced writes.
// It can be up to 10x slower than the kernel above for large matrices.
__global__ void transpose_naive(float *odata, float* idata, int width, int height)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if (xIndex < width && yIndex < height)
{
unsigned int index_in = xIndex + width * yIndex;
unsigned int index_out = yIndex + height * xIndex;
odata[index_out] = idata[index_in];
}
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C" void computeGold( float* reference, float* idata,
const unsigned int size_x, const unsigned int size_y );
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
// size of the matrix
#ifdef __DEVICE_EMULATION__
const unsigned int size_x = 32;
const unsigned int size_y = 128;
#else
const unsigned int size_x = 256;
const unsigned int size_y = 4096;
#endif
// size of memory required to store the matrix
const unsigned int mem_size = sizeof(float) * size_x * size_y;
unsigned int timer;
cutCreateTimer(&timer);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
// allocate host memory
float* h_idata = (float*) malloc(mem_size);
// initalize the memory
srand(15235911);
for( unsigned int i = 0; i < (size_x * size_y); ++i)
{
h_idata[i] = (float) i; // rand();
}
// allocate device memory
float* d_idata;
float* d_odata;
cutilSafeCall( cudaMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( cudaMalloc( (void**) &d_odata, mem_size));
// copy host memory to device
cutilSafeCall( cudaMemcpy( d_idata, h_idata, mem_size,
cudaMemcpyHostToDevice) );
// setup execution parameters
dim3 grid(size_x / BLOCK_DIM, size_y / BLOCK_DIM, 1);
dim3 threads(BLOCK_DIM, BLOCK_DIM, 1);
// warmup so we don't time CUDA startup
transpose_naive<<< grid, threads >>>(d_odata, d_idata, size_x, size_y);
transpose<<< grid, threads >>>(d_odata, d_idata, size_x, size_y);
// synchronize here, so we make sure that we don't count any time from the asynchronize kernel launches.
cudaThreadSynchronize();
int numIterations = 1;
printf("Transposing a %d by %d matrix of floats...\n", size_x, size_y);
// execute the kernel
cutStartTimer(timer);
for (int i = 0; i < numIterations; ++i)
{
transpose_naive<<< grid, threads >>>(d_odata, d_idata, size_x, size_y);
}
cudaThreadSynchronize();
cutStopTimer(timer);
float naiveTime = cutGetTimerValue(timer);
// execute the kernel
cutResetTimer(timer);
cutStartTimer(timer);
for (int i = 0; i < numIterations; ++i)
{
transpose<<< grid, threads >>>(d_odata, d_idata, size_x, size_y);
}
cudaThreadSynchronize();
cutStopTimer(timer);
float optimizedTime = cutGetTimerValue(timer);
printf("Naive transpose average time: %0.3f ms\n", naiveTime / numIterations);
printf("Optimized transpose average time: %0.3f ms\n\n", optimizedTime / numIterations);
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
// copy result from device to host
float* h_odata = (float*) malloc(mem_size);
cutilSafeCall( cudaMemcpy( h_odata, d_odata, mem_size,
cudaMemcpyDeviceToHost) );
// compute reference solution
float* reference = (float*) malloc( mem_size);
computeGold( reference, h_idata, size_x, size_y);
// check result
CUTBoolean res = cutComparef( reference, h_odata, size_x * size_y);
printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// cleanup memory
free(h_idata);
free(h_odata);
free( reference);
cutilSafeCall(cudaFree(d_idata));
cutilSafeCall(cudaFree(d_odata));
cutilCheckError( cutDeleteTimer(timer));
cudaThreadExit();
}
|
7e9ba2cf24f6db78374f6fd310860c85709ab205.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_erff.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_erff), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_erff), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_erff), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7e9ba2cf24f6db78374f6fd310860c85709ab205.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_erff.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_erff<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_erff<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_erff<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
77d709b50a018e1f6f91c4363d647e00b86cc23c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "detectNet.h"
#include "cudaUtility.h"
template<typename T>
__global__ void gpuDetectionOverlay( T* input, T* output, int width, int height, detectNet::Detection* detections, int numDetections, float4* colors )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width || y >= height )
return;
const T px_in = input[ y * width + x ];
T px_out = px_in;
const float fx = x;
const float fy = y;
for( int n=0; n < numDetections; n++ )
{
const detectNet::Detection det = detections[n];
// check if this pixel is inside the bounding box
if( fx >= det.Left && fx <= det.Right && fy >= det.Top && fy <= det.Bottom )
{
const float4 color = colors[det.ClassID];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px_out.x = alpha * color.x + ialph * px_out.x;
px_out.y = alpha * color.y + ialph * px_out.y;
px_out.z = alpha * color.z + ialph * px_out.z;
}
}
output[y * width + x] = px_out;
}
template<typename T>
__global__ void gpuDetectionOverlayBox( T* input, T* output, int imgWidth, int imgHeight, int x0, int y0, int boxWidth, int boxHeight, const float4 color )
{
const int box_x = blockIdx.x * blockDim.x + threadIdx.x;
const int box_y = blockIdx.y * blockDim.y + threadIdx.y;
if( box_x >= boxWidth || box_y >= boxHeight )
return;
const int x = box_x + x0;
const int y = box_y + y0;
if( x >= imgWidth || y >= imgHeight )
return;
T px = input[ y * imgWidth + x ];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
output[y * imgWidth + x] = px;
}
template<typename T>
hipError_t launchDetectionOverlay( T* input, T* output, uint32_t width, uint32_t height, detectNet::Detection* detections, int numDetections, float4* colors )
{
if( !input || !output || width == 0 || height == 0 || !detections || numDetections == 0 || !colors )
return hipErrorInvalidValue;
// if input and output are the same image, then we can use the faster method
// which draws 1 box per kernel, but doesn't copy pixels that aren't inside boxes
if( input == output )
{
for( int n=0; n < numDetections; n++ )
{
const int boxWidth = (int)detections[n].Width();
const int boxHeight = (int)detections[n].Height();
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(boxWidth,blockDim.x), iDivUp(boxHeight,blockDim.y));
hipLaunchKernelGGL(( gpuDetectionOverlayBox<T>), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height, (int)detections[n].Left, (int)detections[n].Top, boxWidth, boxHeight, colors[detections[n].ClassID]);
}
}
else
{
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
hipLaunchKernelGGL(( gpuDetectionOverlay<T>), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height, detections, numDetections, colors);
}
return hipGetLastError();
}
hipError_t cudaDetectionOverlay( void* input, void* output, uint32_t width, uint32_t height, imageFormat format, detectNet::Detection* detections, int numDetections, float4* colors )
{
if( format == IMAGE_RGB8 )
return launchDetectionOverlay<uchar3>((uchar3*)input, (uchar3*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGBA8 )
return launchDetectionOverlay<uchar4>((uchar4*)input, (uchar4*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGB32F )
return launchDetectionOverlay<float3>((float3*)input, (float3*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGBA32F )
return launchDetectionOverlay<float4>((float4*)input, (float4*)output, width, height, detections, numDetections, colors);
else
return hipErrorInvalidValue;
}
| 77d709b50a018e1f6f91c4363d647e00b86cc23c.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "detectNet.h"
#include "cudaUtility.h"
template<typename T>
__global__ void gpuDetectionOverlay( T* input, T* output, int width, int height, detectNet::Detection* detections, int numDetections, float4* colors )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width || y >= height )
return;
const T px_in = input[ y * width + x ];
T px_out = px_in;
const float fx = x;
const float fy = y;
for( int n=0; n < numDetections; n++ )
{
const detectNet::Detection det = detections[n];
// check if this pixel is inside the bounding box
if( fx >= det.Left && fx <= det.Right && fy >= det.Top && fy <= det.Bottom )
{
const float4 color = colors[det.ClassID];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px_out.x = alpha * color.x + ialph * px_out.x;
px_out.y = alpha * color.y + ialph * px_out.y;
px_out.z = alpha * color.z + ialph * px_out.z;
}
}
output[y * width + x] = px_out;
}
template<typename T>
__global__ void gpuDetectionOverlayBox( T* input, T* output, int imgWidth, int imgHeight, int x0, int y0, int boxWidth, int boxHeight, const float4 color )
{
const int box_x = blockIdx.x * blockDim.x + threadIdx.x;
const int box_y = blockIdx.y * blockDim.y + threadIdx.y;
if( box_x >= boxWidth || box_y >= boxHeight )
return;
const int x = box_x + x0;
const int y = box_y + y0;
if( x >= imgWidth || y >= imgHeight )
return;
T px = input[ y * imgWidth + x ];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
output[y * imgWidth + x] = px;
}
template<typename T>
cudaError_t launchDetectionOverlay( T* input, T* output, uint32_t width, uint32_t height, detectNet::Detection* detections, int numDetections, float4* colors )
{
if( !input || !output || width == 0 || height == 0 || !detections || numDetections == 0 || !colors )
return cudaErrorInvalidValue;
// if input and output are the same image, then we can use the faster method
// which draws 1 box per kernel, but doesn't copy pixels that aren't inside boxes
if( input == output )
{
for( int n=0; n < numDetections; n++ )
{
const int boxWidth = (int)detections[n].Width();
const int boxHeight = (int)detections[n].Height();
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(boxWidth,blockDim.x), iDivUp(boxHeight,blockDim.y));
gpuDetectionOverlayBox<T><<<gridDim, blockDim>>>(input, output, width, height, (int)detections[n].Left, (int)detections[n].Top, boxWidth, boxHeight, colors[detections[n].ClassID]);
}
}
else
{
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
gpuDetectionOverlay<T><<<gridDim, blockDim>>>(input, output, width, height, detections, numDetections, colors);
}
return cudaGetLastError();
}
cudaError_t cudaDetectionOverlay( void* input, void* output, uint32_t width, uint32_t height, imageFormat format, detectNet::Detection* detections, int numDetections, float4* colors )
{
if( format == IMAGE_RGB8 )
return launchDetectionOverlay<uchar3>((uchar3*)input, (uchar3*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGBA8 )
return launchDetectionOverlay<uchar4>((uchar4*)input, (uchar4*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGB32F )
return launchDetectionOverlay<float3>((float3*)input, (float3*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGBA32F )
return launchDetectionOverlay<float4>((float4*)input, (float4*)output, width, height, detections, numDetections, colors);
else
return cudaErrorInvalidValue;
}
|
87661adbc5c64b27725c675d698d56726cf86c3b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hipsparse.h>
#include <rocblas.h>
using namespace std;
#ifdef SINGLE
typedef float real2;
#else
typedef double real2;
#endif
extern int *rowStarts, *col, NN, solverIterMax, solverIter, bigNumber;
extern double solverTol, solverNorm;
extern real2 *u, *val, *F;
time_t start, end;
//-------------------------------------------------------------------------
void CUSPARSEsolver()
//-------------------------------------------------------------------------
{
int *d_col, *d_row;
real2 a, b, r0, r1;
real2 *d_val, *d_x;
real2 *d_r, *d_p, *d_Ax;
int i, k;
real2 *val_real2, *F_real2;
//-------------------------------------------------------------------------------
// Converting val and F values from double to real2
val_real2 = new real2[rowStarts[NN]];
for(i=0; i<rowStarts[NN]; i++) {
val_real2[i] = real2(val[i]);
}
F_real2 = new real2[NN];
for(i=0; i<NN; i++) {
F_real2[i] = real2(F[i])*bigNumber;
}
//-------------------------------------------------------------------------------
hipsparseHandle_t handle = 0;
hipsparseStatus_t status;
status = hipsparseCreate(&handle);
if (status != HIPSPARSE_STATUS_SUCCESS) {
fprintf( stderr, "!!!! CUSPARSE initialization error\n" );
}
hipsparseMatDescr_t descr = 0;
status = hipsparseCreateMatDescr(&descr);
if (status != HIPSPARSE_STATUS_SUCCESS) {
fprintf( stderr, "!!!! CUSPARSE hipsparseCreateMatDescr error\n" );
}
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
for(i=0; i<NN; i++) {
u[i] = 0.0;
}
hipMalloc((void**)&d_col, (rowStarts[NN])*sizeof(int));
hipMalloc((void**)&d_row, (NN+1)*sizeof(int));
hipMalloc((void**)&d_val, (rowStarts[NN])*sizeof(real2));
hipMalloc((void**)&d_x, NN*sizeof(real2));
hipMalloc((void**)&d_r, NN*sizeof(real2));
hipMalloc((void**)&d_p, NN*sizeof(real2));
hipMalloc((void**)&d_Ax, NN*sizeof(real2));
hipMemcpy(d_col, col, (rowStarts[NN])*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_row, rowStarts, (NN+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_val, val_real2, (rowStarts[NN])*sizeof(real2), hipMemcpyHostToDevice);
hipMemcpy(d_x, u, NN*sizeof(real2), hipMemcpyHostToDevice);
hipMemcpy(d_r, F_real2, NN*sizeof(real2), hipMemcpyHostToDevice);
#ifdef SINGLE
hipsparseScsrmv(handle,HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, 1.0, descr, d_val, d_row, d_col, d_x, 0.0, d_Ax);
hipblasSaxpy(NN, -1.0, d_Ax, 1, d_r, 1);
r1 = hipblasSdot(NN, d_r, 1, d_r, 1);
#else
hipsparseDcsrmv(handle,HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, 1.0, descr, d_val, d_row, d_col, d_x, 0.0, d_Ax);
hipblasDaxpy(NN, -1.0, d_Ax, 1, d_r, 1);
r1 = hipblasDdot(NN, d_r, 1, d_r, 1);
#endif
r0=0;
k = 1;
while (r1 > solverTol*solverTol && k <= solverIterMax) {
if (k > 1) {
b = r1 / r0;
#ifdef SINGLE
hipblasSscal(NN, b, d_p, 1);
hipblasSaxpy(NN, 1.0, d_r, 1, d_p, 1);
#else
hipblasDscal(NN, b, d_p, 1);
hipblasDaxpy(NN, 1.0, d_r, 1, d_p, 1);
#endif
} else {
#ifdef SINGLE
hipblasScopy(NN, d_r, 1, d_p, 1);
#else
hipblasDcopy(NN, d_r, 1, d_p, 1);
#endif
}
#ifdef SINGLE
hipsparseScsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, 1.0, descr, d_val, d_row, d_col, d_p, 0.0, d_Ax);
a = r1 / hipblasSdot(NN, d_p, 1, d_Ax, 1);
hipblasSaxpy(NN, a, d_p, 1, d_x, 1);
hipblasSaxpy(NN, -a, d_Ax, 1, d_r, 1);
r0 = r1;
r1 = hipblasSdot(NN, d_r, 1, d_r, 1);
#else
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, 1.0, descr, d_val, d_row, d_col, d_p, 0.0, d_Ax);
a = r1 / hipblasDdot(NN, d_p, 1, d_Ax, 1);
hipblasDaxpy(NN, a, d_p, 1, d_x, 1);
hipblasDaxpy(NN, -a, d_Ax, 1, d_r, 1);
r0 = r1;
r1 = hipblasDdot(NN, d_r, 1, d_r, 1);
#endif
hipDeviceSynchronize();
k++;
}
//-------------------------------------------------------------------------------
// Writes CG solution answers
hipMemcpy(u, d_x, (NN)*sizeof(real2), hipMemcpyDeviceToHost);
cout << endl;
//cout << endl;
//for(i=0; i<NN; i++) {
// printf("%f \n", u[i]);
//}
//cout << endl;
cout <<"number of iterations: "<< k << endl;
//-------------------------------------------------------------------------------
hipsparseDestroy(handle);
hipFree(d_col);
hipFree(d_row);
hipFree(d_val);
hipFree(d_x);
hipFree(d_r);
hipFree(d_p);
hipFree(d_Ax);
}
| 87661adbc5c64b27725c675d698d56726cf86c3b.cu | #include <stdio.h>
#include <iostream>
#include <cusparse.h>
#include <cublas.h>
using namespace std;
#ifdef SINGLE
typedef float real2;
#else
typedef double real2;
#endif
extern int *rowStarts, *col, NN, solverIterMax, solverIter, bigNumber;
extern double solverTol, solverNorm;
extern real2 *u, *val, *F;
time_t start, end;
//-------------------------------------------------------------------------
void CUSPARSEsolver()
//-------------------------------------------------------------------------
{
int *d_col, *d_row;
real2 a, b, r0, r1;
real2 *d_val, *d_x;
real2 *d_r, *d_p, *d_Ax;
int i, k;
real2 *val_real2, *F_real2;
//-------------------------------------------------------------------------------
// Converting val and F values from double to real2
val_real2 = new real2[rowStarts[NN]];
for(i=0; i<rowStarts[NN]; i++) {
val_real2[i] = real2(val[i]);
}
F_real2 = new real2[NN];
for(i=0; i<NN; i++) {
F_real2[i] = real2(F[i])*bigNumber;
}
//-------------------------------------------------------------------------------
cusparseHandle_t handle = 0;
cusparseStatus_t status;
status = cusparseCreate(&handle);
if (status != CUSPARSE_STATUS_SUCCESS) {
fprintf( stderr, "!!!! CUSPARSE initialization error\n" );
}
cusparseMatDescr_t descr = 0;
status = cusparseCreateMatDescr(&descr);
if (status != CUSPARSE_STATUS_SUCCESS) {
fprintf( stderr, "!!!! CUSPARSE cusparseCreateMatDescr error\n" );
}
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
for(i=0; i<NN; i++) {
u[i] = 0.0;
}
cudaMalloc((void**)&d_col, (rowStarts[NN])*sizeof(int));
cudaMalloc((void**)&d_row, (NN+1)*sizeof(int));
cudaMalloc((void**)&d_val, (rowStarts[NN])*sizeof(real2));
cudaMalloc((void**)&d_x, NN*sizeof(real2));
cudaMalloc((void**)&d_r, NN*sizeof(real2));
cudaMalloc((void**)&d_p, NN*sizeof(real2));
cudaMalloc((void**)&d_Ax, NN*sizeof(real2));
cudaMemcpy(d_col, col, (rowStarts[NN])*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_row, rowStarts, (NN+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_val, val_real2, (rowStarts[NN])*sizeof(real2), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, u, NN*sizeof(real2), cudaMemcpyHostToDevice);
cudaMemcpy(d_r, F_real2, NN*sizeof(real2), cudaMemcpyHostToDevice);
#ifdef SINGLE
cusparseScsrmv(handle,CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, 1.0, descr, d_val, d_row, d_col, d_x, 0.0, d_Ax);
cublasSaxpy(NN, -1.0, d_Ax, 1, d_r, 1);
r1 = cublasSdot(NN, d_r, 1, d_r, 1);
#else
cusparseDcsrmv(handle,CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, 1.0, descr, d_val, d_row, d_col, d_x, 0.0, d_Ax);
cublasDaxpy(NN, -1.0, d_Ax, 1, d_r, 1);
r1 = cublasDdot(NN, d_r, 1, d_r, 1);
#endif
r0=0;
k = 1;
while (r1 > solverTol*solverTol && k <= solverIterMax) {
if (k > 1) {
b = r1 / r0;
#ifdef SINGLE
cublasSscal(NN, b, d_p, 1);
cublasSaxpy(NN, 1.0, d_r, 1, d_p, 1);
#else
cublasDscal(NN, b, d_p, 1);
cublasDaxpy(NN, 1.0, d_r, 1, d_p, 1);
#endif
} else {
#ifdef SINGLE
cublasScopy(NN, d_r, 1, d_p, 1);
#else
cublasDcopy(NN, d_r, 1, d_p, 1);
#endif
}
#ifdef SINGLE
cusparseScsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, 1.0, descr, d_val, d_row, d_col, d_p, 0.0, d_Ax);
a = r1 / cublasSdot(NN, d_p, 1, d_Ax, 1);
cublasSaxpy(NN, a, d_p, 1, d_x, 1);
cublasSaxpy(NN, -a, d_Ax, 1, d_r, 1);
r0 = r1;
r1 = cublasSdot(NN, d_r, 1, d_r, 1);
#else
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, 1.0, descr, d_val, d_row, d_col, d_p, 0.0, d_Ax);
a = r1 / cublasDdot(NN, d_p, 1, d_Ax, 1);
cublasDaxpy(NN, a, d_p, 1, d_x, 1);
cublasDaxpy(NN, -a, d_Ax, 1, d_r, 1);
r0 = r1;
r1 = cublasDdot(NN, d_r, 1, d_r, 1);
#endif
cudaThreadSynchronize();
k++;
}
//-------------------------------------------------------------------------------
// Writes CG solution answers
cudaMemcpy(u, d_x, (NN)*sizeof(real2), cudaMemcpyDeviceToHost);
cout << endl;
//cout << endl;
//for(i=0; i<NN; i++) {
// printf("%f \n", u[i]);
//}
//cout << endl;
cout <<"number of iterations: "<< k << endl;
//-------------------------------------------------------------------------------
cusparseDestroy(handle);
cudaFree(d_col);
cudaFree(d_row);
cudaFree(d_val);
cudaFree(d_x);
cudaFree(d_r);
cudaFree(d_p);
cudaFree(d_Ax);
}
|
9c1d610c87db424d6aa3a910a8c701cf8d9f0c9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by jing on 2018/7/1.
//
// add vscode sup
#include "cuckoo.h"
#include <assert.h>
#include <device_launch_parameters.h>
#include "api.h"
// Supported operations
#define ADD (0)
#define DELETE (1)
#define SEARCH (2)
#define debug_num 30
#define single_BUCKET 15629
/// hash table
__constant__ cuckoo table;
#define get_table_length(i) get_table_bucket_length(i)
#define get_table_bucket_length(i) (table.Lsize[i]/BUCKET_SIZE)
/// Lsize0 is the biggest
#define Lock_pos(num,hash) ((num) * (get_table_length(0)) + hash)
#define parameter_of_hash_function_a(num) (table.hash_fun[num].x)
#define parameter_of_hash_function_b(num) (table.hash_fun[num].y)
/// hash functiong
__device__ __forceinline__ TYPE
get_next_loc(TYPE k,
TYPE num_table)
{
return ( k^ parameter_of_hash_function_a(num_table)
+ parameter_of_hash_function_b(num_table)
) % PRIME_uint
% get_table_length(num_table);
}
/// for debug
__device__ void pbucket(bucket *b,int num,int hash,int t_size)
{
printf("table.%d,%d/%d \n",num,hash,t_size);
for(int i=0;i<BUCKET_SIZE;i++){
if(i%8==0) printf("\n\t");
printf("%d,%d ",b->key[i],b->value[i]);
}
printf("\n");
}
__global__ void
cuckoo_insert(TYPE* key, /// key to insert
TYPE* value, /// value to insert
TYPE size, /// insert size
int* resize) /// insert error?
{
// insertok=0;
// inserterror=0;
*resize = 0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
/// for every k
#if head_info_debug
if(tid==0) {
printf(">>>insert kernel:\n>>>size:%d \n", size);
printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n",
table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]);
}
#endif
/// warp cooperation
int lan_id = threadIdx.x & 0x0000001f;
int warp_num_in_block = threadIdx.x >> 5;
volatile __shared__ int warp[(THREAD_NUM) >> 5];
/// 0 work_over,1 work to be done ,2 work error
/// this can bu used to accelerate resize by long chain
int is_active=0;
/// work kv
TYPE work_k, work_v;
/// leader thread num
int lead_thread_num;
/// keep kv every thread
TYPE myk, myv;
/// add when evict ,set to 0 when exist or null
TYPE evict_time_of_one_thread = 0;
/// for insert
int hash;
TYPE operator_hash_table_num = 0;
/// using for ballot & CAS
int tmp;
/// ((size+31)>>5)<<5 :keep a warp to active
while ( tid < (((size + 31) >> 5) << 5) ) {
#if MAX_ITERATOR_over_to_break_insert
if(*resize==1)
break;
#endif
evict_time_of_one_thread = 0;
/// read data to insert
if (tid < size) {
is_active = 1;/// mark for work
myk = key[tid];
myv = value[tid];
}
/// if key==0 , not need insert
is_active= (myk==0) ? 0:is_active;
/// while have work to do
while (__any(is_active == 1)) {
/// reisze break line:122 can be here , some kv may disappear
work_k = myk;
work_v = myv;
/// step1 start voting ==================================
/// if there is one active thread , work
if (is_active == 1 ){//&& warp[warp_num_in_block] != lan_id) {
warp[warp_num_in_block] = lan_id;
}
lead_thread_num = warp[warp_num_in_block];
/// step2 broadcast ====================================
work_k = __shfl(work_k, lead_thread_num);
work_v = __shfl(work_v, lead_thread_num);
#if insert_debug
if(lan_id==0 && work_k==debug_num)
printf("2-kv: %d ,%d \n",work_k,work_v);
#endif
/// step3 insert to the table. ===========================
operator_hash_table_num++;
operator_hash_table_num %= TABLE_NUM;
hash = get_next_loc(work_k, operator_hash_table_num);
/// step3.1 lock & un compress TODO: compress
/// lock ,otherwise revote
if (lan_id == lead_thread_num) {
/// TODO: different length need to sum ,tmp using double length
/// tmp 0: free 1: busy
tmp = atomicCAS(&(table.Lock[Lock_pos(operator_hash_table_num, hash)]), 0, 1);
}//end if
tmp = __shfl(tmp, lead_thread_num);
if (tmp == 1){
// revoting
continue;
}
#if insert_debug
if(lan_id==0 && work_k==debug_num)
printf("cas-kv: %d ,%d ,cas:%d \n",work_k,work_v,tmp);
#endif
/// block
bucket *b = &(table.table[operator_hash_table_num][hash]);
/// step3.2 check exist & insert
tmp = __ballot(b->key[lan_id] == work_k);
if (tmp != 0) { /// update
if (lan_id == lead_thread_num) {
/// update value
//printf("update value,%d %d->%d ,tmpk %d\n",myk,b->value[__ffs(tmp) - 1],myv,b->key[__ffs(tmp) - 1]);
b->value[__ffs(tmp) - 1] = myv;
is_active = 0;
evict_time_of_one_thread = 0;
}// end if ,upadte
/// TODO: the lock free: one thread / all thread
table.Lock[Lock_pos(operator_hash_table_num, hash)] = 0;
continue;
}//end check update
/// step3.3 check null & insert
tmp = __ballot(b->key[lan_id] == 0);
#if insert_debug
if(lan_id==0 && work_k==debug_num) {
printf("start-null-kv: %d ,%d ,ballot:%x \n", work_k, work_v, tmp);
pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num));
}
#endif
if (tmp != 0) {
/// set kv
if (lan_id == __ffs(tmp) - 1) {
b->key[lan_id] = work_k;
b->value[lan_id] = work_v;
}// insert
/// mark active false
if (lan_id == lead_thread_num) {
evict_time_of_one_thread = 0;
is_active = 0;
table.Lock[Lock_pos(operator_hash_table_num, hash)] = 0;
}
/// insert ok ,
continue;
}/// null insert over
#if insert_debug
if(lan_id==0 && work_k==debug_num)
printf("evict-kv: %d ,%d ,ballot:%x \n",work_k,work_v,tmp);
#endif
/// step3.4 other,we need cuckoo evict
if (lan_id == lead_thread_num) {
/// choose pos:lan_id evict ,TODO: choose rand?
myk = b->key[lan_id];
myv = b->value[lan_id];
b->key[lan_id] = work_k;
b->value[lan_id] = work_v;
evict_time_of_one_thread++;
/// when one always get leader , mark rehash
/// check long chain
if (evict_time_of_one_thread >= MAX_ITERATOR) {
#if record_cannot_insert_num
atomicAdd(resize,1);
printf(">>>evict_time_of_one_thread,need resize:%d,tid:%d\n",*resize,tid);
#else
*resize=1;
printf("need resize\n");
#endif
evict_time_of_one_thread = 0;
is_active = 2;
}
} // evict
table.Lock[Lock_pos(operator_hash_table_num, hash)] = 0;
}//while any owrk
#if MAX_ITERATOR_over_to_break_insert
key[tid] = (is_active==2) ? myk : 0;
key[tid] = (is_active==2) ? myv : 0;
#endif
tid += BLOCK_NUM * THREAD_NUM;
}//while size
}//cucukoo insert
__global__ void
cuckoo_search(TYPE* key, /// key to s
TYPE* value, /// value to key
TYPE size) /// s size
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
/// for every k
#if head_info_debug
if(tid==0) {
printf(">>>search kernel:\n>>>size:%d \n", size);
printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n",
table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]);
}
#endif
int lan_id = threadIdx.x & 0x0000001f;
int warp_num_in_block = threadIdx.x >> 5;
volatile __shared__ int warp[( THREAD_NUM)>>5 ];
TYPE myk;
TYPE myv;
int is_active;
TYPE work_k = 0;
/// for search
int hash;
int operator_hash_table_num;
int ballot;
bucket *b;
/// ((size+31)>>5)<<5 :keep a warp to active
while ( tid < (((size + 31) >> 5) << 5) ) {
if(tid<size) {
myk = key[tid];
is_active = 1;/// mark for work
}
is_active = (myk==0) ? 0 : is_active;
/// while have work to do
while (__any(is_active != 0)) {
operator_hash_table_num=0;
/// step1 start voting ==================================
if (is_active != 0)
warp[warp_num_in_block] = lan_id;
#if search_debug
if(lan_id==0)
printf("voting: %d\t",warp[warp_num_in_block] );
#endif
work_k = myk;
/// step2 broadcast ====================================
work_k=__shfl(work_k, warp[warp_num_in_block]);
/// step3 find in 5 table ===========================
/// find null or too long
for (int i = 0; i < TABLE_NUM; i++) {
operator_hash_table_num = i;
hash = get_next_loc(work_k, operator_hash_table_num);
b=&table.table[operator_hash_table_num][hash];
ballot=__ballot(b->key[lan_id]==work_k);
/// find it
if(ballot!=0){
if(lan_id==warp[warp_num_in_block]){
myv=b->value[__ffs(ballot)-1];
#if search_debug
printf("find %d: %d\n",key[tid],value[tid]);
#endif
is_active=0;
}
break;
}
}/// end for
/// can not find
if(lan_id==warp[warp_num_in_block]){
if(is_active==1) myv=2;
//printf("cannot find k: %d ,tid:%d ",myk,tid);
//pbucket(b,operator_hash_table_num,hash,get_table_length(operator_hash_table_num));
is_active=0;
}
}
value[tid]=myv;
tid += BLOCK_NUM * THREAD_NUM;
}
}//cuckoo_search
/// del and return value
__global__ void
cuckoo_delete(TYPE* key, /// key to del
TYPE* value, /// value to return
TYPE size) /// size
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
/// for every k
#if head_info_debug
if(tid==0) {
printf(">>>delete kernel:\n>>>size:%d \n", size);
printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n",
table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]);
}
#endif
int lan_id = threadIdx.x & 0x0000001f;
int warp_num_in_block = threadIdx.x >> 5;
volatile __shared__ int warp[( THREAD_NUM)>>5 ];
TYPE myk;
TYPE myv;
int is_active;
TYPE work_k = 0;
/// for search
int hash;
int operator_hash_table_num;
int ballot;
bucket *b;
/// ((size+31)>>5)<<5 :keep a warp to active
while ( tid < (((size + 31) >> 5) << 5) ) {
if(tid<size) {
myk = key[tid];
is_active = 1;/// mark for work
}
is_active = (myk==0)? 0 : is_active;
/// while have work to do
while (__any(is_active != 0)) {
operator_hash_table_num=0;
/// step1 start voting ==================================
if (is_active != 0)
warp[warp_num_in_block] = lan_id;
#if search_debug
if(lan_id==0)
printf("voting: %d\t",warp[warp_num_in_block] );
#endif
work_k = myk;
/// step2 broadcast ====================================
work_k=__shfl(work_k, warp[warp_num_in_block]);
/// step3 find in 5 table ===========================
/// find null or too long
for (int i = 0; i < TABLE_NUM; i++) {
operator_hash_table_num = i;
hash = get_next_loc(work_k, operator_hash_table_num);
b=&table.table[operator_hash_table_num][hash];
ballot=__ballot(b->key[lan_id]==work_k);
/// find it
if(ballot!=0){
if(lan_id==warp[warp_num_in_block]){
myv=b->value[__ffs(ballot)-1];
#if search_debug
printf("find %d: %d\n",key[tid],value[tid]);
#endif
///step3.1 if find, set to zero ===========================
b->key[__ffs(ballot)-1]=0;
is_active=0;
}
break;
}
}/// end for
/// can not find
if(lan_id==warp[warp_num_in_block]){
is_active=0;
}
}
value[tid]=myv;
tid += BLOCK_NUM * THREAD_NUM;
}
}//cuckoo_delete
void __global__
cuckoo_resize_up(bucket* old_table, /// new table has been set to table
int old_size,
TYPE num_table_to_resize) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int warp_num_in_all = tid >> 5;
int lan_id = tid & 0x1f;
/// take kv to insert
TYPE key, value;
int hash;
/// hold old one bucket to op
bucket *b;
///step1 ======================
bucket *new_table = table.table[num_table_to_resize];
///step2 warpbucket ======================
old_size /= BUCKET_SIZE;
while (warp_num_in_all < old_size) {
///step2.1 bucket ======================
b = &old_table[warp_num_in_all];
///step2.2 bucket======================
key = b->key[lan_id];
value = b->value[lan_id];
if (key != 0) {
/// how to use tid & hash fun
hash = get_next_loc(key, num_table_to_resize);
new_table[hash].key[lan_id] = key;
new_table[hash].value[lan_id] = value;
}
tid += BLOCK_NUM * THREAD_NUM;
warp_num_in_all = tid >> 5;
}
}//cuckoo_resize_up
void __global__
cuckoo_resize_down(bucket* old_table, /// small
int old_size,
int num_table_to_resize) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
#if head_info_debug
if(tid==0) {
printf(">>>down_size kernel: %d->%d\n",old_size,table.Lsize[num_table_to_resize]);
printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n",
table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]);
}
#endif
bucket *b=NULL;
bucket *des_b=NULL;
/// take kv to insert
TYPE key, value;
/// insert position
int hash;
int new_bucket_size = table.Lsize[num_table_to_resize] / BUCKET_SIZE;
/// warp coopration
int warp_num_in_all = tid >> 5;
int warp_num_in_block = threadIdx.x >> 5;
int lan_id = tid & 0x1f;
int is_active;
int ballot;
/// in block , for voting
volatile __shared__ int warp[(THREAD_NUM) >> 5];
///step1 ======================
///
bucket *new_table = table.table[num_table_to_resize];
/// end ,next : old->new
#if down_size_debug
if (tid==0)
printf("step start \n");
#endif
///step2 warp2bucket->bucket ======================
/// tid tid+new_bucket_size bucket tid bucket
/// PROBLEM new_bucket_size * 2 = old_size (api.cpp line 47)
/// old_size downsize
/// PROBLEM: bucketbucket
///
///
/// 1. add
/// 2. kv scanshared
/// 3. warp
/// one thread one block
while (warp_num_in_all < new_bucket_size) { /// new size is smaller
///step2.1 bucket ======================
/// warp_num_in_all is hash_value
des_b = &new_table[warp_num_in_all];
#if down_size_debug
if (tid==0)
printf("step2.1 start \n");
#endif
///step2.2 bucket ======================
/// bucketkv
b = &old_table[warp_num_in_all];
key = b->key[lan_id];
value = b->value[lan_id];
#if down_size_debug
if(tid==0){
printf("old table1\n");
pbucket(b,0,0,0);
}
if(warp_num_in_all==0)
printf("b1-%d: %d,%d\n",lan_id,key,value);
#endif
int crose_lan_id=31-lan_id;
/// kvbucket
b = &old_table[warp_num_in_all + new_bucket_size];
if (key == 0) {
key = b->key[crose_lan_id];
value = b->value[crose_lan_id];
}
///bucket
#if down_size_debug
if(tid==0){
printf("old table2\n");
pbucket(b,0,0,0);
}
if(warp_num_in_all==0)
printf("b1-%d: %d,%d\n",lan_id,key,value);
#endif
///step2.3 kv=====================
des_b->key[lan_id] = key;
des_b->value[lan_id] = value;
#if down_size_debug || down_size_cas_insert_debug
if(tid==0)
printf("write\n");
if(tid==0)
pbucket(des_b,num_table_to_resize,tid,get_table_length(num_table_to_resize));
#endif
is_active=0;
///step2.4 bucketkv ======================
if (key != b->key[crose_lan_id] ///
&& b->key[crose_lan_id] !=0) ///
{
key = b->key[crose_lan_id];
value = b->value[crose_lan_id];
is_active = 1;
}
#if down_size_debug || down_size_cas_insert_debug
if(warp_num_in_block==0)
printf("b1-%d: %d,%d - %d\n",lan_id,key,value,is_active);
#endif
///step2.5 bucketkv======================
/// PROBLEM: how about skip step2.5 use step3 directly
/// scan
///
ballot = __ballot(des_b->key[lan_id] == 0);
#if down_size_debug
if( tid==0 && ballot == 0 )
printf("step 2.5 , full\n");
#endif
while (__any(des_b->key[lan_id] == 0)) {
#if down_size_debug
if(tid==0)
printf("step 2.5 \n");
#endif
if(!__any(is_active==1)) break;
#if down_size_debug
if(tid==0)
pbucket(des_b,num_table_to_resize,tid,get_table_length(num_table_to_resize));
#endif
///
ballot = __ballot(des_b->key[lan_id] == 0);
/// use hash as tmp to decrease register
///
hash = __ffs(ballot) - 1;
///
if (is_active == 1)
warp[warp_num_in_block] = lan_id;
/// insert
if (warp[warp_num_in_block] == lan_id) {
des_b->key[hash] = key;
des_b->value[hash] = value;
is_active=0;
}
}
///step 3 ======================
#if down_size_debug || down_size_cas_insert_debug
if(tid==0)
printf("after2.5 start3\n");
if(tid==0)
pbucket(des_b,num_table_to_resize,tid,get_table_length(num_table_to_resize));
#endif
/// key value has kv to insert
TYPE work_k,work_v;
int operator_hash_table_num=0;
int lead_thread_num;
#if down_size_cas_insert_debug
if(warp_num_in_all==0) {
printf("b1-%d: %d,%d - %d\n",lan_id,key,value,is_active);
}
#endif
while (__any(is_active != 0)) {
/// using logic of cuckoo_insert (__global__)
/// how to reuse the code ?
/// TODO , check too long evict
work_k = key;
work_v = value;
/// step3.1 start voting ==================================
if (is_active != 0)//&& warp[warp_num_in_block]!=lan_id )
warp[warp_num_in_block] = lan_id;
/// leader is lead_thread_num
lead_thread_num = warp[warp_num_in_block];
/// step3.2 broadcast ====================================
work_k = __shfl(work_k, lead_thread_num);
work_v = __shfl(work_v, lead_thread_num);
/// step3.3 insert to the table. ===========================
operator_hash_table_num ++;
/// donot insert to table:num_table_to_resize full
if (operator_hash_table_num==num_table_to_resize ) {
operator_hash_table_num++;
}
operator_hash_table_num %= TABLE_NUM;
hash = get_next_loc(work_k, operator_hash_table_num);
/// step3.4 lock TODO: compress ===========================
/// using ballot as tmp to decrease register
/// lock ,otherwise revote
if (lan_id == lead_thread_num) {
/// TODO: different length need to sum ,tmp using double length
ballot = atomicCAS(&(table.Lock[Lock_pos(operator_hash_table_num,hash)]), 0, 1);
}//end if
ballot = __shfl(ballot, lead_thread_num);
if (ballot == 1)
continue;
b = &(table.table[operator_hash_table_num][hash]);
#if down_size_cas_insert_debug
ballot=__ballot(is_active==1);
if(warp_num_in_block==0 && lan_id==0){
printf("\n\nactive ballot:%x kv %d,%d lead_thread_num:%d\n",
ballot,work_k,work_v,lead_thread_num );
pbucket(b,operator_hash_table_num,hash,get_table_length(operator_hash_table_num));
}
#endif
/// step3.5 check exist & insert
ballot = __ballot(b->key[lan_id] == work_k);
if (ballot != 0) { /// update
if (lan_id == lead_thread_num) {
b->value[__ffs(ballot) - 1] = value;
is_active = 0;
#if down_size_cas_insert_debug
if(warp_num_in_block==0) {
printf("exit after insert \n");
pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num));
}
#endif
table.Lock[Lock_pos(operator_hash_table_num,hash)] = 0;
}// end if ,upadte
continue;
}//end check update
/// step3.6 check null & insert
ballot = __ballot(b->key[lan_id] == 0);
#if down_size_cas_insert_debug
if(warp_num_in_block==0) printf("%d,",lan_id);
if(tid==0){
printf("\n\nnull ballot:%x kv %d,%d lead_thread_num:%d \n",
ballot,work_k,work_v,lead_thread_num);
}
#endif
if (ballot != 0) {
/// set kv
if (lan_id == __ffs(ballot) - 1) {
b->key[lan_id] = work_k;
b->value[lan_id] = work_v;
/// free
table.Lock[Lock_pos(operator_hash_table_num,hash)] = 0;
#if down_size_cas_insert_debug
if(warp_num_in_block==0) {
printf("null after insert \n");
pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num));
}
#endif
}// insert
/// mark active false
if (lan_id == lead_thread_num)
is_active = 0;
continue;
}/// null insert over
/// step3.7 other,we need cuckoo evict
if (lan_id == lead_thread_num){
key = b->key[lan_id];
value = b->value[lan_id];
b->key[lan_id] = work_k;
b->value[lan_id] = work_v;
#if down_size_cas_insert_debug
if(warp_num_in_block==0) {
printf("evict after insert \n");
pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num));
}
#endif
table.Lock[Lock_pos(operator_hash_table_num,hash)] = 0;
} // evict
}
/// TODO:auto configure ,what should be add to tid
tid += BLOCK_NUM * THREAD_NUM;
warp_num_in_all = tid >> 5;
}
}//cuckoo_resize_down
int choose_block_num(TYPE size);
void GPU_cuckoo_resize_up(int num_table_to_resize,
TYPE old_size,
bucket* new_table,
cuckoo *h_table)
{
checkCudaErrors(hipGetLastError());
TYPE new_size=old_size*2;
/// set table & size it needed
bucket* old_table=h_table->table[num_table_to_resize];
h_table->Lsize[num_table_to_resize]=new_size;
h_table->table[num_table_to_resize]=new_table;
hipMemcpyToSymbol(table,h_table,sizeof(cuckoo));
/// TODO: auto configure
/// kernel Configuration
dim3 block=choose_block_num(old_size);
/// kernel launch
GpuTimer timer;
timer.Start();
hipLaunchKernelGGL(( cuckoo_resize_up), dim3(block),dim3(THREAD_NUM), 0, 0, old_table,old_size,num_table_to_resize);
timer.Stop();
double diff = timer.Elapsed()*1000000;
printf("kernel <<<rehash>>>the time is %.2lf us, ( %.2f Mops)s\n",
(double)diff, (double)(new_size) / diff);
}//GPU_cuckoo_resize_up
void GPU_cuckoo_resize_down(int num_table_to_resize,
TYPE old_size,
bucket* new_table,
cuckoo *h_table)
{
/// bucket to size : << 5
int new_size=((get_table_bucket_size(num_table_to_resize)+1)/2) << 5;
//printf("down_size : %d : szie%d->%d.",num_table_to_resize,old_size,new_size);
/// set table & size it needed
bucket* old_table=h_table->table[num_table_to_resize];
h_table->Lsize[num_table_to_resize]=new_size;
h_table->table[num_table_to_resize]=new_table;
hipMemcpyToSymbol(table,h_table,sizeof(cuckoo));
dim3 block=choose_block_num(old_size);
/// kernel launch
hipLaunchKernelGGL(( cuckoo_resize_down), dim3(block),dim3(THREAD_NUM), 0, 0, old_table,old_size,num_table_to_resize);
}//GPU_cuckoo_resize_down
/// show table by key,value
__global__ void show_table() {
if (blockIdx.x * blockDim.x + threadIdx.x > 0) return;
/// i is the table num
for (int i = 0; i < TABLE_NUM; i++) {
printf("\n\n\ntable:%d\n", i);
/// j is the bucket num
for (int j = 0; j < get_table_length(i); j++) {
printf("bucket:%d\n", j);
/// t is every slot(one bucket has 32 slot)
for (int t = 0; t < BUCKET_SIZE; t++) {
/// 8 slot a line
if (t % 8 == 0) printf("\n\t\t");
printf(" %d,%d ", table.table[i][j].key[t], table.table[i][j].value[t]);
}
printf("\n");
}
}
}
void GPU_show_table(){
hipLaunchKernelGGL(( show_table), dim3(1),dim3(1), 0, 0, );
}
void gpu_lp_insert(TYPE* key,
TYPE* value,
TYPE size,
int* resize)
{
dim3 block=choose_block_num(size);
GpuTimer time;
time.Start();
hipLaunchKernelGGL(( cuckoo_insert) , dim3(block), dim3(THREAD_NUM) , 0, 0, key, value, size, resize);
time.Stop();
double diff = time.Elapsed() * 1000000;
printf("kernel <<<insert>>>the time is %.2lf us ( %.2f Mops)\n",
(double) diff, (double) (size) / diff);
}//gpu_lp_insert
void gpu_lp_search(TYPE* key,
TYPE* ans,
TYPE size){
dim3 block=choose_block_num(size);
GpuTimer time;
time.Start();
hipLaunchKernelGGL(( cuckoo_search), dim3(block),dim3(THREAD_NUM), 0, 0, key,ans,size);
time.Stop();
double diff = time.Elapsed() * 1000000;
printf("kernel <<<search>>>the time is %.2lf us, ( %.2f Mops)s\n",
(double)diff, (double)(size) / diff);
// checkCudaErrors(hipGetLastError());
}
void gpu_lp_delete(TYPE* key,
TYPE* ans,
TYPE size){
dim3 block=choose_block_num(size);
GpuTimer time;
time.Start();
hipLaunchKernelGGL(( cuckoo_delete), dim3(block),dim3(THREAD_NUM), 0, 0, key,ans,size);
time.Stop();
double diff = time.Elapsed() * 1000000;
printf("delete <<<delete>>>the time is %.2lf us, ( %.2f Mops)s\n",
(double)diff, (double)(size) / diff);
// checkCudaErrors(hipGetLastError());
}
void gpu_lp_set_table(cuckoo *h_table) {
//printf("seting table\n");
hipMemcpyToSymbol(table,h_table,sizeof(cuckoo));
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
int choose_block_num(TYPE size){
unsigned int real_block=(size+THREAD_NUM-1)/THREAD_NUM;
/// BLOCK_NUM
int block=real_block>BLOCK_NUM ? BLOCK_NUM : real_block;
///
block=block<1?1:block;
return block;
}
| 9c1d610c87db424d6aa3a910a8c701cf8d9f0c9b.cu | //
// Created by jing on 2018/7/1.
//
// add vscode sup
#include "cuckoo.h"
#include <assert.h>
#include <device_launch_parameters.h>
#include "api.h"
// Supported operations
#define ADD (0)
#define DELETE (1)
#define SEARCH (2)
#define debug_num 30
#define single_BUCKET 15629
/// hash table
__constant__ cuckoo table;
#define get_table_length(i) get_table_bucket_length(i)
#define get_table_bucket_length(i) (table.Lsize[i]/BUCKET_SIZE)
/// Lsize0 is the biggest
#define Lock_pos(num,hash) ((num) * (get_table_length(0)) + hash)
#define parameter_of_hash_function_a(num) (table.hash_fun[num].x)
#define parameter_of_hash_function_b(num) (table.hash_fun[num].y)
/// hash functiong
__device__ __forceinline__ TYPE
get_next_loc(TYPE k,
TYPE num_table)
{
return ( k^ parameter_of_hash_function_a(num_table)
+ parameter_of_hash_function_b(num_table)
) % PRIME_uint
% get_table_length(num_table);
}
/// for debug
__device__ void pbucket(bucket *b,int num,int hash,int t_size)
{
printf("table.%d,%d/%d \n",num,hash,t_size);
for(int i=0;i<BUCKET_SIZE;i++){
if(i%8==0) printf("\n\t");
printf("%d,%d ",b->key[i],b->value[i]);
}
printf("\n");
}
__global__ void
cuckoo_insert(TYPE* key, /// key to insert
TYPE* value, /// value to insert
TYPE size, /// insert size
int* resize) /// insert error?
{
// insertok=0;
// inserterror=0;
*resize = 0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
/// for every k
#if head_info_debug
if(tid==0) {
printf(">>>insert kernel:\n>>>size:%d \n", size);
printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n",
table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]);
}
#endif
/// warp cooperation
int lan_id = threadIdx.x & 0x0000001f;
int warp_num_in_block = threadIdx.x >> 5;
volatile __shared__ int warp[(THREAD_NUM) >> 5];
/// 0 work_over,1 work to be done ,2 work error
/// this can bu used to accelerate resize by long chain
int is_active=0;
/// work kv
TYPE work_k, work_v;
/// leader thread num
int lead_thread_num;
/// keep kv every thread
TYPE myk, myv;
/// add when evict ,set to 0 when exist or null
TYPE evict_time_of_one_thread = 0;
/// for insert
int hash;
TYPE operator_hash_table_num = 0;
/// using for ballot & CAS
int tmp;
/// ((size+31)>>5)<<5 :keep a warp to active
while ( tid < (((size + 31) >> 5) << 5) ) {
#if MAX_ITERATOR_over_to_break_insert
if(*resize==1)
break;
#endif
evict_time_of_one_thread = 0;
/// read data to insert
if (tid < size) {
is_active = 1;/// mark for work
myk = key[tid];
myv = value[tid];
}
/// if key==0 , not need insert
is_active= (myk==0) ? 0:is_active;
/// while have work to do
while (__any(is_active == 1)) {
/// reisze break line:122 can be here , some kv may disappear
work_k = myk;
work_v = myv;
/// step1 start voting ==================================
/// if there is one active thread , work
if (is_active == 1 ){//&& warp[warp_num_in_block] != lan_id) {
warp[warp_num_in_block] = lan_id;
}
lead_thread_num = warp[warp_num_in_block];
/// step2 broadcast ====================================
work_k = __shfl(work_k, lead_thread_num);
work_v = __shfl(work_v, lead_thread_num);
#if insert_debug
if(lan_id==0 && work_k==debug_num)
printf("2-kv: %d ,%d \n",work_k,work_v);
#endif
/// step3 insert to the table. ===========================
operator_hash_table_num++;
operator_hash_table_num %= TABLE_NUM;
hash = get_next_loc(work_k, operator_hash_table_num);
/// step3.1 lock & un compress TODO: compress
/// lock ,otherwise revote
if (lan_id == lead_thread_num) {
/// TODO: different length need to sum ,tmp using double length
/// tmp 0: free 1: busy
tmp = atomicCAS(&(table.Lock[Lock_pos(operator_hash_table_num, hash)]), 0, 1);
}//end if
tmp = __shfl(tmp, lead_thread_num);
if (tmp == 1){
// revoting
continue;
}
#if insert_debug
if(lan_id==0 && work_k==debug_num)
printf("cas-kv: %d ,%d ,cas:%d \n",work_k,work_v,tmp);
#endif
/// block
bucket *b = &(table.table[operator_hash_table_num][hash]);
/// step3.2 check exist & insert
tmp = __ballot(b->key[lan_id] == work_k);
if (tmp != 0) { /// update
if (lan_id == lead_thread_num) {
/// update value
//printf("update value,%d %d->%d ,tmpk %d\n",myk,b->value[__ffs(tmp) - 1],myv,b->key[__ffs(tmp) - 1]);
b->value[__ffs(tmp) - 1] = myv;
is_active = 0;
evict_time_of_one_thread = 0;
}// end if ,upadte
/// TODO: the lock free: one thread / all thread
table.Lock[Lock_pos(operator_hash_table_num, hash)] = 0;
continue;
}//end check update
/// step3.3 check null & insert
tmp = __ballot(b->key[lan_id] == 0);
#if insert_debug
if(lan_id==0 && work_k==debug_num) {
printf("start-null-kv: %d ,%d ,ballot:%x \n", work_k, work_v, tmp);
pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num));
}
#endif
if (tmp != 0) {
/// set kv
if (lan_id == __ffs(tmp) - 1) {
b->key[lan_id] = work_k;
b->value[lan_id] = work_v;
}// insert
/// mark active false
if (lan_id == lead_thread_num) {
evict_time_of_one_thread = 0;
is_active = 0;
table.Lock[Lock_pos(operator_hash_table_num, hash)] = 0;
}
/// insert ok ,
continue;
}/// null insert over
#if insert_debug
if(lan_id==0 && work_k==debug_num)
printf("evict-kv: %d ,%d ,ballot:%x \n",work_k,work_v,tmp);
#endif
/// step3.4 other,we need cuckoo evict
if (lan_id == lead_thread_num) {
/// choose pos:lan_id evict ,TODO: choose rand?
myk = b->key[lan_id];
myv = b->value[lan_id];
b->key[lan_id] = work_k;
b->value[lan_id] = work_v;
evict_time_of_one_thread++;
/// when one always get leader , mark rehash
/// check long chain
if (evict_time_of_one_thread >= MAX_ITERATOR) {
#if record_cannot_insert_num
atomicAdd(resize,1);
printf(">>>evict_time_of_one_thread,need resize:%d,tid:%d\n",*resize,tid);
#else
*resize=1;
printf("need resize\n");
#endif
evict_time_of_one_thread = 0;
is_active = 2;
}
} // evict
table.Lock[Lock_pos(operator_hash_table_num, hash)] = 0;
}//while any owrk
#if MAX_ITERATOR_over_to_break_insert
key[tid] = (is_active==2) ? myk : 0;
key[tid] = (is_active==2) ? myv : 0;
#endif
tid += BLOCK_NUM * THREAD_NUM;
}//while size
}//cucukoo insert
__global__ void
cuckoo_search(TYPE* key, /// key to s
TYPE* value, /// value to key
TYPE size) /// s size
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
/// for every k
#if head_info_debug
if(tid==0) {
printf(">>>search kernel:\n>>>size:%d \n", size);
printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n",
table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]);
}
#endif
int lan_id = threadIdx.x & 0x0000001f;
int warp_num_in_block = threadIdx.x >> 5;
volatile __shared__ int warp[( THREAD_NUM)>>5 ];
TYPE myk;
TYPE myv;
int is_active;
TYPE work_k = 0;
/// for search
int hash;
int operator_hash_table_num;
int ballot;
bucket *b;
/// ((size+31)>>5)<<5 :keep a warp to active
while ( tid < (((size + 31) >> 5) << 5) ) {
if(tid<size) {
myk = key[tid];
is_active = 1;/// mark for work
}
is_active = (myk==0) ? 0 : is_active;
/// while have work to do
while (__any(is_active != 0)) {
operator_hash_table_num=0;
/// step1 start voting ==================================
if (is_active != 0)
warp[warp_num_in_block] = lan_id;
#if search_debug
if(lan_id==0)
printf("voting: %d\t",warp[warp_num_in_block] );
#endif
work_k = myk;
/// step2 broadcast ====================================
work_k=__shfl(work_k, warp[warp_num_in_block]);
/// step3 find in 5 table ===========================
/// find null or too long
for (int i = 0; i < TABLE_NUM; i++) {
operator_hash_table_num = i;
hash = get_next_loc(work_k, operator_hash_table_num);
b=&table.table[operator_hash_table_num][hash];
ballot=__ballot(b->key[lan_id]==work_k);
/// find it
if(ballot!=0){
if(lan_id==warp[warp_num_in_block]){
myv=b->value[__ffs(ballot)-1];
#if search_debug
printf("find %d: %d\n",key[tid],value[tid]);
#endif
is_active=0;
}
break;
}
}/// end for
/// can not find
if(lan_id==warp[warp_num_in_block]){
if(is_active==1) myv=2;
//printf("cannot find k: %d ,tid:%d ",myk,tid);
//pbucket(b,operator_hash_table_num,hash,get_table_length(operator_hash_table_num));
is_active=0;
}
}
value[tid]=myv;
tid += BLOCK_NUM * THREAD_NUM;
}
}//cuckoo_search
/// del and return value
__global__ void
cuckoo_delete(TYPE* key, /// key to del
TYPE* value, /// value to return
TYPE size) /// size
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
/// for every k
#if head_info_debug
if(tid==0) {
printf(">>>delete kernel:\n>>>size:%d \n", size);
printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n",
table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]);
}
#endif
int lan_id = threadIdx.x & 0x0000001f;
int warp_num_in_block = threadIdx.x >> 5;
volatile __shared__ int warp[( THREAD_NUM)>>5 ];
TYPE myk;
TYPE myv;
int is_active;
TYPE work_k = 0;
/// for search
int hash;
int operator_hash_table_num;
int ballot;
bucket *b;
/// ((size+31)>>5)<<5 :keep a warp to active
while ( tid < (((size + 31) >> 5) << 5) ) {
if(tid<size) {
myk = key[tid];
is_active = 1;/// mark for work
}
is_active = (myk==0)? 0 : is_active;
/// while have work to do
while (__any(is_active != 0)) {
operator_hash_table_num=0;
/// step1 start voting ==================================
if (is_active != 0)
warp[warp_num_in_block] = lan_id;
#if search_debug
if(lan_id==0)
printf("voting: %d\t",warp[warp_num_in_block] );
#endif
work_k = myk;
/// step2 broadcast ====================================
work_k=__shfl(work_k, warp[warp_num_in_block]);
/// step3 find in 5 table ===========================
/// find null or too long
for (int i = 0; i < TABLE_NUM; i++) {
operator_hash_table_num = i;
hash = get_next_loc(work_k, operator_hash_table_num);
b=&table.table[operator_hash_table_num][hash];
ballot=__ballot(b->key[lan_id]==work_k);
/// find it
if(ballot!=0){
if(lan_id==warp[warp_num_in_block]){
myv=b->value[__ffs(ballot)-1];
#if search_debug
printf("find %d: %d\n",key[tid],value[tid]);
#endif
///step3.1 if find, set to zero ===========================
b->key[__ffs(ballot)-1]=0;
is_active=0;
}
break;
}
}/// end for
/// can not find
if(lan_id==warp[warp_num_in_block]){
is_active=0;
}
}
value[tid]=myv;
tid += BLOCK_NUM * THREAD_NUM;
}
}//cuckoo_delete
void __global__
cuckoo_resize_up(bucket* old_table, /// new table has been set to table
int old_size,
TYPE num_table_to_resize) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int warp_num_in_all = tid >> 5;
int lan_id = tid & 0x1f;
/// take kv to insert
TYPE key, value;
int hash;
/// hold old one bucket to op
bucket *b;
///step1 取新表 ======================
bucket *new_table = table.table[num_table_to_resize];
///step2 每个warp处理一个bucket ======================
old_size /= BUCKET_SIZE;
while (warp_num_in_all < old_size) {
///step2.1 获取自己的bucket ======================
b = &old_table[warp_num_in_all];
///step2.2 对bucket中各插入对应的位置======================
key = b->key[lan_id];
value = b->value[lan_id];
if (key != 0) {
/// how to use tid & hash fun
hash = get_next_loc(key, num_table_to_resize);
new_table[hash].key[lan_id] = key;
new_table[hash].value[lan_id] = value;
}
tid += BLOCK_NUM * THREAD_NUM;
warp_num_in_all = tid >> 5;
}
}//cuckoo_resize_up
void __global__
cuckoo_resize_down(bucket* old_table, /// small
int old_size,
int num_table_to_resize) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
#if head_info_debug
if(tid==0) {
printf(">>>down_size kernel: %d->%d\n",old_size,table.Lsize[num_table_to_resize]);
printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n",
table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]);
}
#endif
bucket *b=NULL;
bucket *des_b=NULL;
/// take kv to insert
TYPE key, value;
/// insert position
int hash;
int new_bucket_size = table.Lsize[num_table_to_resize] / BUCKET_SIZE;
/// warp coopration
int warp_num_in_all = tid >> 5;
int warp_num_in_block = threadIdx.x >> 5;
int lan_id = tid & 0x1f;
int is_active;
int ballot;
/// in block , for voting
volatile __shared__ int warp[(THREAD_NUM) >> 5];
///step1 置换新表 ======================
/// 与新表对应的表长已设置好
bucket *new_table = table.table[num_table_to_resize];
/// end ,next : old->new
#if down_size_debug
if (tid==0)
printf("step start \n");
#endif
///step2 每个warp处理2个bucket->一个bucket ======================
/// 分别将 旧表 tid tid+new_bucket_size 两个bucket插入到新表的 tid bucket中
/// PROBLEM: 这里默认 new_bucket_size * 2 = old_size (api.cpp line 47)
/// 方法 部分条件下可将old_size 设置为偶数,这样只有在多次downsize之后才会不符合上述条件
/// PROBLEM: 将两个bucket映射到一个bucket,在元素较多的情况下势必造成部分
/// 溢出,除了将溢出部分插入到其他表,我们还需要合理安排两个到一个的映射关系使之高
/// 效转换。
/// 方法1. 逐个查询,使用原子add
/// 方法2. 对空位置和非空kv scan,直接得到相应位置,:需要shared或其他数组支持
/// 方法3. 首先进行简单插入,然后使用warp通信找到空位置插入
/// one thread one block
while (warp_num_in_all < new_bucket_size) { /// new size is smaller
///step2.1 获取新表的bucket ======================
/// warp_num_in_all is hash_value
des_b = &new_table[warp_num_in_all];
#if down_size_debug
if (tid==0)
printf("step2.1 start \n");
#endif
///step2.2 获取第一个旧表的bucket ======================
/// 读入第一个bucket中kv到变量
b = &old_table[warp_num_in_all];
key = b->key[lan_id];
value = b->value[lan_id];
#if down_size_debug
if(tid==0){
printf("old table1\n");
pbucket(b,0,0,0);
}
if(warp_num_in_all==0)
printf("b1-%d: %d,%d\n",lan_id,key,value);
#endif
int crose_lan_id=31-lan_id;
/// 空kv再此读入第二个bucket 交叉读取
b = &old_table[warp_num_in_all + new_bucket_size];
if (key == 0) {
key = b->key[crose_lan_id];
value = b->value[crose_lan_id];
}
///到这里,第一个bucket全部会被读入后面接着写入,第二个部分还未读入
#if down_size_debug
if(tid==0){
printf("old table2\n");
pbucket(b,0,0,0);
}
if(warp_num_in_all==0)
printf("b1-%d: %d,%d\n",lan_id,key,value);
#endif
///step2.3 将不为空的kv插入新表=====================
des_b->key[lan_id] = key;
des_b->value[lan_id] = value;
#if down_size_debug || down_size_cas_insert_debug
if(tid==0)
printf("write\n");
if(tid==0)
pbucket(des_b,num_table_to_resize,tid,get_table_length(num_table_to_resize));
#endif
is_active=0;
///step2.4 读取第二个bucket中未存入的kv ======================
if (key != b->key[crose_lan_id] /// 从未写入过
&& b->key[crose_lan_id] !=0) /// 存在值
{
key = b->key[crose_lan_id];
value = b->value[crose_lan_id];
is_active = 1;
}
#if down_size_debug || down_size_cas_insert_debug
if(warp_num_in_block==0)
printf("b1-%d: %d,%d - %d\n",lan_id,key,value,is_active);
#endif
///step2.5 对新bucket还有的空位进行插入kv======================
/// PROBLEM: how about skip step2.5 use step3 directly
/// 如果空位置比较少会比较快,否则可能使用scan会更快
/// 如果还有空位
ballot = __ballot(des_b->key[lan_id] == 0);
#if down_size_debug
if( tid==0 && ballot == 0 )
printf("step 2.5 , full\n");
#endif
while (__any(des_b->key[lan_id] == 0)) {
#if down_size_debug
if(tid==0)
printf("step 2.5 \n");
#endif
if(!__any(is_active==1)) break;
#if down_size_debug
if(tid==0)
pbucket(des_b,num_table_to_resize,tid,get_table_length(num_table_to_resize));
#endif
/// 找出空位
ballot = __ballot(des_b->key[lan_id] == 0);
/// use hash as tmp to decrease register
/// 选一个空位
hash = __ffs(ballot) - 1;
/// 选一个线程
if (is_active == 1)
warp[warp_num_in_block] = lan_id;
/// insert
if (warp[warp_num_in_block] == lan_id) {
des_b->key[hash] = key;
des_b->value[hash] = value;
is_active=0;
}
}
///step 3 如果位将第二个表中元素全部插入完成,插入到其他表中======================
#if down_size_debug || down_size_cas_insert_debug
if(tid==0)
printf("after2.5 start3\n");
if(tid==0)
pbucket(des_b,num_table_to_resize,tid,get_table_length(num_table_to_resize));
#endif
/// key value has kv to insert
TYPE work_k,work_v;
int operator_hash_table_num=0;
int lead_thread_num;
#if down_size_cas_insert_debug
if(warp_num_in_all==0) {
printf("b1-%d: %d,%d - %d\n",lan_id,key,value,is_active);
}
#endif
while (__any(is_active != 0)) {
/// using logic of cuckoo_insert (__global__)
/// how to reuse the code ?
/// TODO , check too long evict
work_k = key;
work_v = value;
/// step3.1 start voting ==================================
if (is_active != 0)//&& warp[warp_num_in_block]!=lan_id )
warp[warp_num_in_block] = lan_id;
/// leader is lead_thread_num
lead_thread_num = warp[warp_num_in_block];
/// step3.2 broadcast ====================================
work_k = __shfl(work_k, lead_thread_num);
work_v = __shfl(work_v, lead_thread_num);
/// step3.3 insert to the table. ===========================
operator_hash_table_num ++;
/// donot insert to table:num_table_to_resize full
if (operator_hash_table_num==num_table_to_resize ) {
operator_hash_table_num++;
}
operator_hash_table_num %= TABLE_NUM;
hash = get_next_loc(work_k, operator_hash_table_num);
/// step3.4 lock TODO: compress ===========================
/// using ballot as tmp to decrease register
/// lock ,otherwise revote
if (lan_id == lead_thread_num) {
/// TODO: different length need to sum ,tmp using double length
ballot = atomicCAS(&(table.Lock[Lock_pos(operator_hash_table_num,hash)]), 0, 1);
}//end if
ballot = __shfl(ballot, lead_thread_num);
if (ballot == 1)
continue;
b = &(table.table[operator_hash_table_num][hash]);
#if down_size_cas_insert_debug
ballot=__ballot(is_active==1);
if(warp_num_in_block==0 && lan_id==0){
printf("\n\nactive ballot:%x kv %d,%d lead_thread_num:%d\n",
ballot,work_k,work_v,lead_thread_num );
pbucket(b,operator_hash_table_num,hash,get_table_length(operator_hash_table_num));
}
#endif
/// step3.5 check exist & insert
ballot = __ballot(b->key[lan_id] == work_k);
if (ballot != 0) { /// update
if (lan_id == lead_thread_num) {
b->value[__ffs(ballot) - 1] = value;
is_active = 0;
#if down_size_cas_insert_debug
if(warp_num_in_block==0) {
printf("exit after insert \n");
pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num));
}
#endif
table.Lock[Lock_pos(operator_hash_table_num,hash)] = 0;
}// end if ,upadte
continue;
}//end check update
/// step3.6 check null & insert
ballot = __ballot(b->key[lan_id] == 0);
#if down_size_cas_insert_debug
if(warp_num_in_block==0) printf("%d,",lan_id);
if(tid==0){
printf("\n\nnull ballot:%x kv %d,%d lead_thread_num:%d \n",
ballot,work_k,work_v,lead_thread_num);
}
#endif
if (ballot != 0) {
/// set kv
if (lan_id == __ffs(ballot) - 1) {
b->key[lan_id] = work_k;
b->value[lan_id] = work_v;
/// free
table.Lock[Lock_pos(operator_hash_table_num,hash)] = 0;
#if down_size_cas_insert_debug
if(warp_num_in_block==0) {
printf("null after insert \n");
pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num));
}
#endif
}// insert
/// mark active false
if (lan_id == lead_thread_num)
is_active = 0;
continue;
}/// null insert over
/// step3.7 other,we need cuckoo evict
if (lan_id == lead_thread_num){
key = b->key[lan_id];
value = b->value[lan_id];
b->key[lan_id] = work_k;
b->value[lan_id] = work_v;
#if down_size_cas_insert_debug
if(warp_num_in_block==0) {
printf("evict after insert \n");
pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num));
}
#endif
table.Lock[Lock_pos(operator_hash_table_num,hash)] = 0;
} // evict
}
/// TODO:auto configure ,what should be add to tid
tid += BLOCK_NUM * THREAD_NUM;
warp_num_in_all = tid >> 5;
}
}//cuckoo_resize_down
int choose_block_num(TYPE size);
void GPU_cuckoo_resize_up(int num_table_to_resize,
TYPE old_size,
bucket* new_table,
cuckoo *h_table)
{
checkCudaErrors(cudaGetLastError());
TYPE new_size=old_size*2;
/// set table & size it needed
bucket* old_table=h_table->table[num_table_to_resize];
h_table->Lsize[num_table_to_resize]=new_size;
h_table->table[num_table_to_resize]=new_table;
cudaMemcpyToSymbol(table,h_table,sizeof(cuckoo));
/// TODO: auto configure
/// kernel Configuration
dim3 block=choose_block_num(old_size);
/// kernel launch
GpuTimer timer;
timer.Start();
cuckoo_resize_up<<<block,THREAD_NUM>>>(old_table,old_size,num_table_to_resize);
timer.Stop();
double diff = timer.Elapsed()*1000000;
printf("kernel <<<rehash>>>:the time is %.2lf us, ( %.2f Mops)s\n",
(double)diff, (double)(new_size) / diff);
}//GPU_cuckoo_resize_up
void GPU_cuckoo_resize_down(int num_table_to_resize,
TYPE old_size,
bucket* new_table,
cuckoo *h_table)
{
/// bucket to size : << 5
int new_size=((get_table_bucket_size(num_table_to_resize)+1)/2) << 5;
//printf("down_size : %d : szie%d->%d.",num_table_to_resize,old_size,new_size);
/// set table & size it needed
bucket* old_table=h_table->table[num_table_to_resize];
h_table->Lsize[num_table_to_resize]=new_size;
h_table->table[num_table_to_resize]=new_table;
cudaMemcpyToSymbol(table,h_table,sizeof(cuckoo));
dim3 block=choose_block_num(old_size);
/// kernel launch
cuckoo_resize_down<<<block,THREAD_NUM>>>(old_table,old_size,num_table_to_resize);
}//GPU_cuckoo_resize_down
/// show table by key,value
__global__ void show_table() {
if (blockIdx.x * blockDim.x + threadIdx.x > 0) return;
/// i is the table num
for (int i = 0; i < TABLE_NUM; i++) {
printf("\n\n\ntable:%d\n", i);
/// j is the bucket num
for (int j = 0; j < get_table_length(i); j++) {
printf("bucket:%d\n", j);
/// t is every slot(one bucket has 32 slot)
for (int t = 0; t < BUCKET_SIZE; t++) {
/// 8 slot a line
if (t % 8 == 0) printf("\n\t\t");
printf(" %d,%d ", table.table[i][j].key[t], table.table[i][j].value[t]);
}
printf("\n");
}
}
}
void GPU_show_table(){
show_table<<<1,1>>>();
}
void gpu_lp_insert(TYPE* key,
TYPE* value,
TYPE size,
int* resize)
{
dim3 block=choose_block_num(size);
GpuTimer time;
time.Start();
cuckoo_insert <<< block, THREAD_NUM >>> (key, value, size, resize);
time.Stop();
double diff = time.Elapsed() * 1000000;
printf("kernel <<<insert>>>:the time is %.2lf us ( %.2f Mops)\n",
(double) diff, (double) (size) / diff);
}//gpu_lp_insert
void gpu_lp_search(TYPE* key,
TYPE* ans,
TYPE size){
dim3 block=choose_block_num(size);
GpuTimer time;
time.Start();
cuckoo_search<<<block,THREAD_NUM>>>(key,ans,size);
time.Stop();
double diff = time.Elapsed() * 1000000;
printf("kernel <<<search>>>:the time is %.2lf us, ( %.2f Mops)s\n",
(double)diff, (double)(size) / diff);
// checkCudaErrors(cudaGetLastError());
}
void gpu_lp_delete(TYPE* key,
TYPE* ans,
TYPE size){
dim3 block=choose_block_num(size);
GpuTimer time;
time.Start();
cuckoo_delete<<<block,THREAD_NUM>>>(key,ans,size);
time.Stop();
double diff = time.Elapsed() * 1000000;
printf("delete <<<delete>>>:the time is %.2lf us, ( %.2f Mops)s\n",
(double)diff, (double)(size) / diff);
// checkCudaErrors(cudaGetLastError());
}
void gpu_lp_set_table(cuckoo *h_table) {
//printf("seting table\n");
cudaMemcpyToSymbol(table,h_table,sizeof(cuckoo));
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
int choose_block_num(TYPE size){
unsigned int real_block=(size+THREAD_NUM-1)/THREAD_NUM;
/// 不能超过 BLOCK_NUM
int block=real_block>BLOCK_NUM ? BLOCK_NUM : real_block;
///
block=block<1?1:block;
return block;
}
|
99a8e81e7ae0d700d1e43f6083cd7efb76821b4d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "randomNumbering.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
hiprandState_t *globalState = NULL;
hipMalloc(&globalState, XSIZE*YSIZE);
int *degreeCount = NULL;
hipMalloc(°reeCount, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int limit = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
randomNumbering), dim3(gridBlock),dim3(threadBlock), 0, 0, globalState,degreeCount,n,limit);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
randomNumbering), dim3(gridBlock),dim3(threadBlock), 0, 0, globalState,degreeCount,n,limit);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
randomNumbering), dim3(gridBlock),dim3(threadBlock), 0, 0, globalState,degreeCount,n,limit);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 99a8e81e7ae0d700d1e43f6083cd7efb76821b4d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "randomNumbering.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
curandState *globalState = NULL;
cudaMalloc(&globalState, XSIZE*YSIZE);
int *degreeCount = NULL;
cudaMalloc(°reeCount, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int limit = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
randomNumbering<<<gridBlock,threadBlock>>>(globalState,degreeCount,n,limit);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
randomNumbering<<<gridBlock,threadBlock>>>(globalState,degreeCount,n,limit);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
randomNumbering<<<gridBlock,threadBlock>>>(globalState,degreeCount,n,limit);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2c2c082db6a98be14c6ff9c5a12a17f5eac41f27.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/primitive/include/copy_nd.h"
#include "oneflow/core/primitive/common/copy_nd.h"
#include "oneflow/core/stream/cuda/cuda_stream_context.h"
#include <hip/hip_runtime.h>
namespace oneflow {
namespace primitive {
namespace {
template<size_t num_dims, size_t movement_size, typename IndexType>
__global__ void CopyNdKernel(CopyNdKernelParams<num_dims, IndexType> params) {
using T = typename std::aligned_storage<movement_size, movement_size>::type;
const T* src = reinterpret_cast<const T*>(params.src);
T* dst = reinterpret_cast<T*>(params.dst);
IndexType copy_index[num_dims];
IndexType src_index[num_dims];
IndexType dst_index[num_dims];
CUDA_1D_KERNEL_LOOP_T(IndexType, i, params.count) {
params.copy_index_helper.OffsetToNdIndex(i, copy_index);
#pragma unroll
for (size_t j = 0; j < num_dims; ++j) {
src_index[j] = params.src_pos[j] + copy_index[j];
dst_index[j] = params.dst_pos[j] + copy_index[j];
}
const IndexType src_offset = params.src_index_helper.NdIndexToOffset(src_index);
const IndexType dst_offset = params.dst_index_helper.NdIndexToOffset(dst_index);
dst[dst_offset] = src[src_offset];
}
}
template<size_t num_dims, size_t movement_size, typename IndexType>
void LaunchKernel(StreamContext* stream_ctx, CopyNdKernelParams<num_dims, IndexType> params) {
hipStream_t cuda_stream =
CHECK_NOTNULL(dynamic_cast<CudaStreamContext*>(stream_ctx))->cuda_stream();
hipLaunchKernelGGL(( CopyNdKernel<num_dims, movement_size, IndexType>)
, dim3(BlocksNum4ThreadsNum(params.count)), dim3(kCudaThreadsNumPerBlock), 0, cuda_stream, params);
}
class CopyNdImpl : public CopyNd {
public:
OF_DISALLOW_COPY_AND_MOVE(CopyNdImpl);
CopyNdImpl() = default;
~CopyNdImpl() override = default;
void Launch(StreamContext* stream_ctx, DataType data_type, size_t num_dims, void* dst,
const int64_t* dst_dims, const int64_t* dst_pos, const void* src,
const int64_t* src_dims, const int64_t* src_pos,
const int64_t* extent) const override {
SimplifyThenLaunch(stream_ctx, data_type, num_dims, dst, dst_dims, dst_pos, src, src_dims,
src_pos, extent);
}
};
class CopyNdFactoryImpl : public CopyNdFactory {
public:
OF_DISALLOW_COPY_AND_MOVE(CopyNdFactoryImpl);
CopyNdFactoryImpl() = default;
~CopyNdFactoryImpl() override = default;
std::unique_ptr<CopyNd> New(size_t max_num_dims) override {
if (max_num_dims <= kMaxNumDims) {
return std::unique_ptr<CopyNd>(new CopyNdImpl());
} else {
return nullptr;
}
}
};
REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, CopyNdFactory, CopyNdFactoryImpl);
} // namespace
} // namespace primitive
} // namespace oneflow
| 2c2c082db6a98be14c6ff9c5a12a17f5eac41f27.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/primitive/include/copy_nd.h"
#include "oneflow/core/primitive/common/copy_nd.h"
#include "oneflow/core/stream/cuda/cuda_stream_context.h"
#include <cuda_runtime.h>
namespace oneflow {
namespace primitive {
namespace {
template<size_t num_dims, size_t movement_size, typename IndexType>
__global__ void CopyNdKernel(CopyNdKernelParams<num_dims, IndexType> params) {
using T = typename std::aligned_storage<movement_size, movement_size>::type;
const T* src = reinterpret_cast<const T*>(params.src);
T* dst = reinterpret_cast<T*>(params.dst);
IndexType copy_index[num_dims];
IndexType src_index[num_dims];
IndexType dst_index[num_dims];
CUDA_1D_KERNEL_LOOP_T(IndexType, i, params.count) {
params.copy_index_helper.OffsetToNdIndex(i, copy_index);
#pragma unroll
for (size_t j = 0; j < num_dims; ++j) {
src_index[j] = params.src_pos[j] + copy_index[j];
dst_index[j] = params.dst_pos[j] + copy_index[j];
}
const IndexType src_offset = params.src_index_helper.NdIndexToOffset(src_index);
const IndexType dst_offset = params.dst_index_helper.NdIndexToOffset(dst_index);
dst[dst_offset] = src[src_offset];
}
}
template<size_t num_dims, size_t movement_size, typename IndexType>
void LaunchKernel(StreamContext* stream_ctx, CopyNdKernelParams<num_dims, IndexType> params) {
cudaStream_t cuda_stream =
CHECK_NOTNULL(dynamic_cast<CudaStreamContext*>(stream_ctx))->cuda_stream();
CopyNdKernel<num_dims, movement_size, IndexType>
<<<BlocksNum4ThreadsNum(params.count), kCudaThreadsNumPerBlock, 0, cuda_stream>>>(params);
}
class CopyNdImpl : public CopyNd {
public:
OF_DISALLOW_COPY_AND_MOVE(CopyNdImpl);
CopyNdImpl() = default;
~CopyNdImpl() override = default;
void Launch(StreamContext* stream_ctx, DataType data_type, size_t num_dims, void* dst,
const int64_t* dst_dims, const int64_t* dst_pos, const void* src,
const int64_t* src_dims, const int64_t* src_pos,
const int64_t* extent) const override {
SimplifyThenLaunch(stream_ctx, data_type, num_dims, dst, dst_dims, dst_pos, src, src_dims,
src_pos, extent);
}
};
class CopyNdFactoryImpl : public CopyNdFactory {
public:
OF_DISALLOW_COPY_AND_MOVE(CopyNdFactoryImpl);
CopyNdFactoryImpl() = default;
~CopyNdFactoryImpl() override = default;
std::unique_ptr<CopyNd> New(size_t max_num_dims) override {
if (max_num_dims <= kMaxNumDims) {
return std::unique_ptr<CopyNd>(new CopyNdImpl());
} else {
return nullptr;
}
}
};
REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, CopyNdFactory, CopyNdFactoryImpl);
} // namespace
} // namespace primitive
} // namespace oneflow
|
624e0d084eafbc3570999bf5b7ca0624d3fef5ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
__global__ void set_const_dev(float k, std::uint32_t size, half *py) {
const std::uint32_t i = IDX;
if (i < size) py[i] = ::__float2half(k);
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA16::reset_tensor_impl(float k, Tensor &x) {
const std::uint32_t size = x.shape().size();
const std::uint32_t num_blocks = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::set_const_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0, k, size, MDATA(half, x));
}
} // namespace devices
} // namespace primitiv
| 624e0d084eafbc3570999bf5b7ca0624d3fef5ef.cu | #include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
__global__ void set_const_dev(float k, std::uint32_t size, half *py) {
const std::uint32_t i = IDX;
if (i < size) py[i] = ::__float2half(k);
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA16::reset_tensor_impl(float k, Tensor &x) {
const std::uint32_t size = x.shape().size();
const std::uint32_t num_blocks = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::set_const_dev<<<num_blocks, dim1_x_>>>(k, size, MDATA(half, x));
}
} // namespace devices
} // namespace primitiv
|
5b151f9dbf082f8b3c5f9709d30bbe81497bdf9e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef CUDARECURSIVEGAUSSIAN_KERNEL_CU
#define CUDARECURSIVEGAUSSIAN_KERNEL_CU
#include <helper_math.h>
#include <stdio.h>
#include <iostream>
#define BLOCK_DIM 64
__global__ void d_recursiveGaussianY(int *d_src, int *d_dest, int depth, int height, int width,
float b0, float b1, float b2, float b3, float B, int order,
float M11, float M12, float M13,
float M21, float M22, float M23,
float M31, float M32, float M33)
{
float wP1 = 0.f, wP2 = 0.f, wP3 = 0.f;
int y = 0;
float outF1 = 0.f, outF2 = 0.f, outF3 = 0.f;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int yy = blockIdx.y*width*height;
if(x > width)
return;
d_src += x + yy;
d_dest += x + yy;
wP1 = (float)*d_src/sqrt(B); wP2 = wP1; wP3 = wP1;
switch (order)
{
case 0:
{
for(y=0;y<height;y++)
{
float xC = (float)*d_src;
float wC = (float)(xC - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width; d_dest += width;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
d_src -= width;
d_dest -= width;
float up = (float)*d_src/(1.0+b1+b2+b3);
float vp = (float)up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-width)-up) + M13*(*(d_dest-2*width)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-width)-up) + M23*(*(d_dest-2*width)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-width)-up) + M33*(*(d_dest-2*width)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= width;
d_dest -= width;
for(y=height-1-1;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= width; d_dest -= width;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
case 1:
{
float xP1 = (float)*(d_src);
float xF1 = (float)*(d_src + 1*width);
wP1 = (float)(xF1 - xP1)/2.0*(1.0+b1+b2+b3); wP3 = wP2 = wP1;
float wC = ((xF1- xP1)/2.0 - b1*wP1 - b2*wP1 - b3*wP1)/b0;
*d_dest = (int)wC;
d_src += width; d_dest += width;
wP3 = wP2; wP2 = wP1; wP1 = wC;
for(y=0;y<height-1-1;y++)
{
xP1 = (float)*(d_src - width);
xF1 = (float)*(d_src + width);
wC = (float)((xF1- xP1)/2.0 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width; d_dest += width;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
xP1 = (float)*(d_src - width);
xF1 = (float)*d_src;
wC = (float)((xF1- xP1)/2.0 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
wP3 = wP2; wP2 = wP1; wP1 = wC;
float up = (*d_src - *(d_src-width))/2.0*(1.0+b1+b2+b3);
float vp = up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-width)-up) + M13*(*(d_dest-2*width)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-width)-up) + M23*(*(d_dest-2*width)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-width)-up) + M33*(*(d_dest-2*width)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= width;
d_dest -= width;
for(y=height-2;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= width; d_dest -= width;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
case 2:
{
float xP1 = (float)*d_src;
float xC = (float)*d_src;
float xF1 = (float)*(d_src+width);
wP1 = 0.0/(1.0+b1+b2+b3); wP3 = wP2 = wP1;
float wC = (float)((xF1 - 2*xC + xP1) - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width; d_dest += width;
wP3 = wP2; wP2 = wP1; wP1 = wC;
for(y=0;y<height-2;y++)
{
xC = (float)*d_src;
xP1 = (float)*(d_src-width);
xF1 = (float)*(d_src+width);
wC = (float)(xF1 - 2*xC + xP1 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width; d_dest += width;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
xC = (float)*d_src;
xP1 = (float)*(d_src-width);
xF1 = (float)*d_src;
wC = (float)(xF1 - 2*xC + xP1 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
wP3 = wP2; wP2 = wP1; wP1 = wC;
float up = 0;
float vp = up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-width)-up) + M13*(*(d_dest-2*width)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-width)-up) + M23*(*(d_dest-2*width)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-width)-up) + M33*(*(d_dest-2*width)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= width;
d_dest -= width;
for(y=height-2;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= width; d_dest -= width;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
}
}
extern "C" void Call_d_recursiveGaussianY(int *d_src, int *d_dest, int width, int height, int depth,
float b0, float b1, float b2, float b3, float B, int order, int n, int nthread,
float M11, float M12, float M13,
float M21, float M22, float M23,
float M31, float M32, float M33)
{
hipLaunchKernelGGL(( d_recursiveGaussianY), dim3(dim3(n, depth)), dim3(nthread), 0, 0, d_src, d_dest, depth, height, width,
b0, b1, b2, b3, B, order,
M11, M12, M13,
M21, M22, M23,
M31, M32, M33);
}
__global__ void d_recursiveGaussianX(int *d_src, int *d_dest, int depth, int height, int width,
float b0, float b1, float b2, float b3, float B, int order,
float M11, float M12, float M13,
float M21, float M22, float M23,
float M31, float M32, float M33)
{
float wP1 = 0.f, wP2 = 0.f, wP3 = 0.f;
int y = 0;
float outF1 = 0.f, outF2 = 0.f, outF3 = 0.f;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int yy = blockIdx.y*width*height;
if(x > width)
return;
d_src += x*width + yy;
d_dest += x*width + yy;
wP1 = (float)*d_src/sqrt(B); wP2 = wP1; wP3 = wP1;
switch (order)
{
case 0:
{
for(y=0;y<width;y++)
{
float xC = (float)*d_src;
float wC = (float)(xC - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += 1; d_dest += 1;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
d_src -= 1;
d_dest -= 1;
float up = (float)*d_src/(1.0+b1+b2+b3);
float vp = (float)up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-1)-up) + M13*(*(d_dest-2*1)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-1)-up) + M23*(*(d_dest-2*1)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-1)-up) + M33*(*(d_dest-2*1)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= 1;
d_dest -= 1;
for(y=width-1-1;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= 1; d_dest -= 1;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
case 1:
{
float xP1 = (float)*(d_src);
float xF1 = (float)*(d_src + 1*1);
wP1 = (float)(xF1 - xP1)/2.0*(1.0+b1+b2+b3); wP3 = wP2 = wP1;
float wC = ((xF1- xP1)/2.0 - b1*wP1 - b2*wP1 - b3*wP1)/b0;
*d_dest = (int)wC;
d_src += 1; d_dest += 1;
wP3 = wP2; wP2 = wP1; wP1 = wC;
for(y=0;y<width-2;y++)
{
xP1 = (float)*(d_src - 1);
xF1 = (float)*(d_src + 1);
wC = (float)((xF1- xP1)/2.0 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += 1; d_dest += 1;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
xP1 = (float)*(d_src - 1);
xF1 = (float)*d_src;
wC = (float)((xF1- xP1)/2.0 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
wP3 = wP2; wP2 = wP1; wP1 = wC;
float up = (*d_src - *(d_src-1))/2.0*(1.0+b1+b2+b3);
//float up = 0;
float vp = up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-1)-up) + M13*(*(d_dest-2*1)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-1)-up) + M23*(*(d_dest-2*1)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-1)-up) + M33*(*(d_dest-2*1)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= 1;
d_dest -= 1;
for(y=width-2;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= 1; d_dest -= 1;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
case 2:
{
float xP1 = (float)*d_src;
float xC = (float)*d_src;
float xF1 = (float)*(d_src+1);
wP1 = 0.0/(1.0+b1+b2+b3); wP3 = wP2 = wP1;
float wC = (int)((xF1 - 2*xC + xP1) - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += 1; d_dest += 1;
wP3 = wP2; wP2 = wP1; wP1 = wC;
for(y=0;y<width-2;y++)
{
xC = (float)*d_src;
xP1 = (float)*(d_src-1);
xF1 = (float)*(d_src+1);
wC = (float)(xF1 - 2*xC + xP1 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += 1; d_dest += 1;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
xC = (float)*d_src;
xP1 = (float)*(d_src-1);
xF1 = (float)*d_src;
wC = (float)(xF1 - 2*xC + xP1 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
wP3 = wP2; wP2 = wP1; wP1 = wC;
float up = 0;
float vp = up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-1)-up) + M13*(*(d_dest-2*1)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-1)-up) + M23*(*(d_dest-2*1)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-1)-up) + M33*(*(d_dest-2*1)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= 1;
d_dest -= 1;
for(y=width-2;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= 1; d_dest -= 1;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
}
}
extern "C" void Call_d_recursiveGaussianX(int *d_src, int *d_dest, int width, int height, int depth,
float b0, float b1, float b2, float b3, float B, int order, int n, int nthread,
float M11, float M12, float M13,
float M21, float M22, float M23,
float M31, float M32, float M33)
{
hipLaunchKernelGGL(( d_recursiveGaussianX), dim3(dim3(n, depth)), dim3(nthread), 0, 0, d_src, d_dest, depth, height, width,
b0, b1, b2, b3, B, order,
M11, M12, M13,
M21, M22, M23,
M31, M32, M33);
}
__global__ void d_recursiveGaussianZ(int* d_src, int* d_dest, int depth, int height, int width,
float b0, float b1, float b2, float b3, float B, int order,
float M11, float M12, float M13,
float M21, float M22, float M23,
float M31, float M32, float M33)
{
float wP1 = 0.f, wP2 = 0.f, wP3 = 0.f;
int y = 0;
float outF1 = 0.f, outF2 = 0.f, outF3 = 0.f;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int yy = blockIdx.y*width;
if(x > width)
return;
d_src += x + yy;
d_dest += x + yy;
wP1 = (float)*d_src/sqrt(B); wP2 = wP1; wP3 = wP1;
switch (order)
{
case 0:
{
for(y=0;y<depth;y++)
{
float xC = (float)*d_src;
float wC = (float)(xC - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width*height; d_dest += width*height;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
d_src -= width*height;
d_dest -= width*height;
float up = (float)*d_src/(1.0+b1+b2+b3);
float vp = (float)up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-width*height)-up) + M13*(*(d_dest-2*width*height)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-width*height)-up) + M23*(*(d_dest-2*width*height)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-width*height)-up) + M33*(*(d_dest-2*width*height)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= width*height;
d_dest -= width*height;
for(y=depth-1-1;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= width*height; d_dest -= width*height;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
case 1:
{
float xP1 = (float)*(d_src);
float xF1 = (float)*(d_src + 1*width*height);
wP1 = (float)(xF1 - xP1)/2.0*(1.0+b1+b2+b3); wP3 = wP2 = wP1;
float wC = ((xF1- xP1)/2.0 - b1*wP1 - b2*wP1 - b3*wP1)/b0;
*d_dest = (int)wC;
d_src += width*height; d_dest += width*height;
wP3 = wP2; wP2 = wP1; wP1 = wC;
for(y=0;y<depth-2;y++)
{
xP1 = (float)*(d_src - width*height);
xF1 = (float)*(d_src + width*height);
wC = (float)((xF1- xP1)/2.0 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width*height; d_dest += width*height;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
xP1 = (float)*(d_src - width*height);
xF1 = (float)*d_src;
wC = (float)((xF1- xP1)/2.0 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
wP3 = wP2; wP2 = wP1; wP1 = wC;
float up = (*d_src - *(d_src-width*height))/2.0*(1.0+b1+b2+b3);
//float up = 0;
float vp = up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-width*height)-up) + M13*(*(d_dest-2*width*height)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-width*height)-up) + M23*(*(d_dest-2*width*height)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-width*height)-up) + M33*(*(d_dest-2*width*height)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= width*height;
d_dest -= width*height;
for(y=depth-2;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= width*height; d_dest -= width*height;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
case 2:
{
float xP1 = (float)*d_src;
float xC = (float)*d_src;
float xF1 = (float)*(d_src+width*height);
wP1 = 0.0/(1.0+b1+b2+b3); wP3 = wP2 = wP1;
float wC = (float)((xF1 - 2*xC + xP1) - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width*height; d_dest += width*height;
wP3 = wP2; wP2 = wP1; wP1 = wC;
for(y=0;y<depth-1-1;y++)
{
xC = (float)*d_src;
xP1 = (float)*(d_src-width*height);
xF1 = (float)*(d_src+width*height);
wC = (float)(xF1 - 2*xC + xP1 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width*height; d_dest += width*height;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
xC = (float)*d_src;
xP1 = (float)*(d_src-width*height);
xF1 = (float)*d_src;
wC = (float)(xF1 - 2*xC + xP1 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
wP3 = wP2; wP2 = wP1; wP1 = wC;
float up = 0;
float vp = up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-width*height)-up) + M13*(*(d_dest-2*width*height)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-width*height)-up) + M23*(*(d_dest-2*width*height)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-width*height)-up) + M33*(*(d_dest-2*width*height)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= width*height;
d_dest -= width*height;
for(y=depth-2;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= width*height; d_dest -= width*height;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
}
}
extern "C" void Call_d_recursiveGaussianZ(int *d_src, int *d_dest, int width, int height, int depth,
float b0, float b1, float b2, float b3, float B, int order, int n, int nthread,
float M11, float M12, float M13,
float M21, float M22, float M23,
float M31, float M32, float M33)
{
hipLaunchKernelGGL(( d_recursiveGaussianZ), dim3(dim3(n, height)), dim3(nthread), 0, 0, d_src, d_dest, depth, height, width,
b0, b1, b2, b3, B, order,
M11, M12, M13,
M21, M22, M23,
M31, M32, M33);
}
#endif // CUDARECURSIVEGAUSSIAN_KERNEL_CU
| 5b151f9dbf082f8b3c5f9709d30bbe81497bdf9e.cu | #ifndef CUDARECURSIVEGAUSSIAN_KERNEL_CU
#define CUDARECURSIVEGAUSSIAN_KERNEL_CU
#include <helper_math.h>
#include <stdio.h>
#include <iostream>
#define BLOCK_DIM 64
__global__ void d_recursiveGaussianY(int *d_src, int *d_dest, int depth, int height, int width,
float b0, float b1, float b2, float b3, float B, int order,
float M11, float M12, float M13,
float M21, float M22, float M23,
float M31, float M32, float M33)
{
float wP1 = 0.f, wP2 = 0.f, wP3 = 0.f;
int y = 0;
float outF1 = 0.f, outF2 = 0.f, outF3 = 0.f;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int yy = blockIdx.y*width*height;
if(x > width)
return;
d_src += x + yy;
d_dest += x + yy;
wP1 = (float)*d_src/sqrt(B); wP2 = wP1; wP3 = wP1;
switch (order)
{
case 0:
{
for(y=0;y<height;y++)
{
float xC = (float)*d_src;
float wC = (float)(xC - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width; d_dest += width;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
d_src -= width;
d_dest -= width;
float up = (float)*d_src/(1.0+b1+b2+b3);
float vp = (float)up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-width)-up) + M13*(*(d_dest-2*width)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-width)-up) + M23*(*(d_dest-2*width)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-width)-up) + M33*(*(d_dest-2*width)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= width;
d_dest -= width;
for(y=height-1-1;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= width; d_dest -= width;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
case 1:
{
float xP1 = (float)*(d_src);
float xF1 = (float)*(d_src + 1*width);
wP1 = (float)(xF1 - xP1)/2.0*(1.0+b1+b2+b3); wP3 = wP2 = wP1;
float wC = ((xF1- xP1)/2.0 - b1*wP1 - b2*wP1 - b3*wP1)/b0;
*d_dest = (int)wC;
d_src += width; d_dest += width;
wP3 = wP2; wP2 = wP1; wP1 = wC;
for(y=0;y<height-1-1;y++)
{
xP1 = (float)*(d_src - width);
xF1 = (float)*(d_src + width);
wC = (float)((xF1- xP1)/2.0 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width; d_dest += width;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
xP1 = (float)*(d_src - width);
xF1 = (float)*d_src;
wC = (float)((xF1- xP1)/2.0 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
wP3 = wP2; wP2 = wP1; wP1 = wC;
float up = (*d_src - *(d_src-width))/2.0*(1.0+b1+b2+b3);
float vp = up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-width)-up) + M13*(*(d_dest-2*width)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-width)-up) + M23*(*(d_dest-2*width)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-width)-up) + M33*(*(d_dest-2*width)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= width;
d_dest -= width;
for(y=height-2;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= width; d_dest -= width;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
case 2:
{
float xP1 = (float)*d_src;
float xC = (float)*d_src;
float xF1 = (float)*(d_src+width);
wP1 = 0.0/(1.0+b1+b2+b3); wP3 = wP2 = wP1;
float wC = (float)((xF1 - 2*xC + xP1) - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width; d_dest += width;
wP3 = wP2; wP2 = wP1; wP1 = wC;
for(y=0;y<height-2;y++)
{
xC = (float)*d_src;
xP1 = (float)*(d_src-width);
xF1 = (float)*(d_src+width);
wC = (float)(xF1 - 2*xC + xP1 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width; d_dest += width;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
xC = (float)*d_src;
xP1 = (float)*(d_src-width);
xF1 = (float)*d_src;
wC = (float)(xF1 - 2*xC + xP1 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
wP3 = wP2; wP2 = wP1; wP1 = wC;
float up = 0;
float vp = up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-width)-up) + M13*(*(d_dest-2*width)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-width)-up) + M23*(*(d_dest-2*width)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-width)-up) + M33*(*(d_dest-2*width)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= width;
d_dest -= width;
for(y=height-2;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= width; d_dest -= width;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
}
}
extern "C" void Call_d_recursiveGaussianY(int *d_src, int *d_dest, int width, int height, int depth,
float b0, float b1, float b2, float b3, float B, int order, int n, int nthread,
float M11, float M12, float M13,
float M21, float M22, float M23,
float M31, float M32, float M33)
{
d_recursiveGaussianY<<<dim3(n, depth), nthread>>>(d_src, d_dest, depth, height, width,
b0, b1, b2, b3, B, order,
M11, M12, M13,
M21, M22, M23,
M31, M32, M33);
}
__global__ void d_recursiveGaussianX(int *d_src, int *d_dest, int depth, int height, int width,
float b0, float b1, float b2, float b3, float B, int order,
float M11, float M12, float M13,
float M21, float M22, float M23,
float M31, float M32, float M33)
{
float wP1 = 0.f, wP2 = 0.f, wP3 = 0.f;
int y = 0;
float outF1 = 0.f, outF2 = 0.f, outF3 = 0.f;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int yy = blockIdx.y*width*height;
if(x > width)
return;
d_src += x*width + yy;
d_dest += x*width + yy;
wP1 = (float)*d_src/sqrt(B); wP2 = wP1; wP3 = wP1;
switch (order)
{
case 0:
{
for(y=0;y<width;y++)
{
float xC = (float)*d_src;
float wC = (float)(xC - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += 1; d_dest += 1;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
d_src -= 1;
d_dest -= 1;
float up = (float)*d_src/(1.0+b1+b2+b3);
float vp = (float)up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-1)-up) + M13*(*(d_dest-2*1)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-1)-up) + M23*(*(d_dest-2*1)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-1)-up) + M33*(*(d_dest-2*1)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= 1;
d_dest -= 1;
for(y=width-1-1;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= 1; d_dest -= 1;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
case 1:
{
float xP1 = (float)*(d_src);
float xF1 = (float)*(d_src + 1*1);
wP1 = (float)(xF1 - xP1)/2.0*(1.0+b1+b2+b3); wP3 = wP2 = wP1;
float wC = ((xF1- xP1)/2.0 - b1*wP1 - b2*wP1 - b3*wP1)/b0;
*d_dest = (int)wC;
d_src += 1; d_dest += 1;
wP3 = wP2; wP2 = wP1; wP1 = wC;
for(y=0;y<width-2;y++)
{
xP1 = (float)*(d_src - 1);
xF1 = (float)*(d_src + 1);
wC = (float)((xF1- xP1)/2.0 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += 1; d_dest += 1;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
xP1 = (float)*(d_src - 1);
xF1 = (float)*d_src;
wC = (float)((xF1- xP1)/2.0 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
wP3 = wP2; wP2 = wP1; wP1 = wC;
float up = (*d_src - *(d_src-1))/2.0*(1.0+b1+b2+b3);
//float up = 0;
float vp = up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-1)-up) + M13*(*(d_dest-2*1)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-1)-up) + M23*(*(d_dest-2*1)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-1)-up) + M33*(*(d_dest-2*1)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= 1;
d_dest -= 1;
for(y=width-2;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= 1; d_dest -= 1;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
case 2:
{
float xP1 = (float)*d_src;
float xC = (float)*d_src;
float xF1 = (float)*(d_src+1);
wP1 = 0.0/(1.0+b1+b2+b3); wP3 = wP2 = wP1;
float wC = (int)((xF1 - 2*xC + xP1) - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += 1; d_dest += 1;
wP3 = wP2; wP2 = wP1; wP1 = wC;
for(y=0;y<width-2;y++)
{
xC = (float)*d_src;
xP1 = (float)*(d_src-1);
xF1 = (float)*(d_src+1);
wC = (float)(xF1 - 2*xC + xP1 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += 1; d_dest += 1;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
xC = (float)*d_src;
xP1 = (float)*(d_src-1);
xF1 = (float)*d_src;
wC = (float)(xF1 - 2*xC + xP1 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
wP3 = wP2; wP2 = wP1; wP1 = wC;
float up = 0;
float vp = up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-1)-up) + M13*(*(d_dest-2*1)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-1)-up) + M23*(*(d_dest-2*1)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-1)-up) + M33*(*(d_dest-2*1)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= 1;
d_dest -= 1;
for(y=width-2;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= 1; d_dest -= 1;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
}
}
extern "C" void Call_d_recursiveGaussianX(int *d_src, int *d_dest, int width, int height, int depth,
float b0, float b1, float b2, float b3, float B, int order, int n, int nthread,
float M11, float M12, float M13,
float M21, float M22, float M23,
float M31, float M32, float M33)
{
d_recursiveGaussianX<<<dim3(n, depth), nthread>>>(d_src, d_dest, depth, height, width,
b0, b1, b2, b3, B, order,
M11, M12, M13,
M21, M22, M23,
M31, M32, M33);
}
__global__ void d_recursiveGaussianZ(int* d_src, int* d_dest, int depth, int height, int width,
float b0, float b1, float b2, float b3, float B, int order,
float M11, float M12, float M13,
float M21, float M22, float M23,
float M31, float M32, float M33)
{
float wP1 = 0.f, wP2 = 0.f, wP3 = 0.f;
int y = 0;
float outF1 = 0.f, outF2 = 0.f, outF3 = 0.f;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int yy = blockIdx.y*width;
if(x > width)
return;
d_src += x + yy;
d_dest += x + yy;
wP1 = (float)*d_src/sqrt(B); wP2 = wP1; wP3 = wP1;
switch (order)
{
case 0:
{
for(y=0;y<depth;y++)
{
float xC = (float)*d_src;
float wC = (float)(xC - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width*height; d_dest += width*height;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
d_src -= width*height;
d_dest -= width*height;
float up = (float)*d_src/(1.0+b1+b2+b3);
float vp = (float)up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-width*height)-up) + M13*(*(d_dest-2*width*height)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-width*height)-up) + M23*(*(d_dest-2*width*height)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-width*height)-up) + M33*(*(d_dest-2*width*height)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= width*height;
d_dest -= width*height;
for(y=depth-1-1;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= width*height; d_dest -= width*height;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
case 1:
{
float xP1 = (float)*(d_src);
float xF1 = (float)*(d_src + 1*width*height);
wP1 = (float)(xF1 - xP1)/2.0*(1.0+b1+b2+b3); wP3 = wP2 = wP1;
float wC = ((xF1- xP1)/2.0 - b1*wP1 - b2*wP1 - b3*wP1)/b0;
*d_dest = (int)wC;
d_src += width*height; d_dest += width*height;
wP3 = wP2; wP2 = wP1; wP1 = wC;
for(y=0;y<depth-2;y++)
{
xP1 = (float)*(d_src - width*height);
xF1 = (float)*(d_src + width*height);
wC = (float)((xF1- xP1)/2.0 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width*height; d_dest += width*height;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
xP1 = (float)*(d_src - width*height);
xF1 = (float)*d_src;
wC = (float)((xF1- xP1)/2.0 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
wP3 = wP2; wP2 = wP1; wP1 = wC;
float up = (*d_src - *(d_src-width*height))/2.0*(1.0+b1+b2+b3);
//float up = 0;
float vp = up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-width*height)-up) + M13*(*(d_dest-2*width*height)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-width*height)-up) + M23*(*(d_dest-2*width*height)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-width*height)-up) + M33*(*(d_dest-2*width*height)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= width*height;
d_dest -= width*height;
for(y=depth-2;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= width*height; d_dest -= width*height;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
case 2:
{
float xP1 = (float)*d_src;
float xC = (float)*d_src;
float xF1 = (float)*(d_src+width*height);
wP1 = 0.0/(1.0+b1+b2+b3); wP3 = wP2 = wP1;
float wC = (float)((xF1 - 2*xC + xP1) - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width*height; d_dest += width*height;
wP3 = wP2; wP2 = wP1; wP1 = wC;
for(y=0;y<depth-1-1;y++)
{
xC = (float)*d_src;
xP1 = (float)*(d_src-width*height);
xF1 = (float)*(d_src+width*height);
wC = (float)(xF1 - 2*xC + xP1 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
d_src += width*height; d_dest += width*height;
wP3 = wP2; wP2 = wP1; wP1 = wC;
}
xC = (float)*d_src;
xP1 = (float)*(d_src-width*height);
xF1 = (float)*d_src;
wC = (float)(xF1 - 2*xC + xP1 - b1*wP1 - b2*wP2 - b3*wP3)/b0;
*d_dest = (int)wC;
wP3 = wP2; wP2 = wP1; wP1 = wC;
float up = 0;
float vp = up/(1.0+b1+b2+b3);
float out = 0.f;
out = (float)M11*(*d_dest-up) + M12*(*(d_dest-width*height)-up) + M13*(*(d_dest-2*width*height)-up)+vp;
outF1 = (float)M21*(*d_dest-up) + M22*(*(d_dest-width*height)-up) + M23*(*(d_dest-2*width*height)-up)+vp;
outF2 = (float)M31*(*d_dest-up) + M32*(*(d_dest-width*height)-up) + M33*(*(d_dest-2*width*height)-up)+vp;
out *= B; outF1 *= B; outF2 *= B;
outF3 = outF2; outF2 = outF1; outF1 = out;
*d_dest = (int)out;
d_src -= width*height;
d_dest -= width*height;
for(y=depth-2;y>=0;y--)
{
float wC = (float)*d_dest;
out = (float)(B*wC - b1*outF1 - b2*outF2 - b3*outF3)/b0;
*d_dest = (int)out;
d_src -= width*height; d_dest -= width*height;
outF3 = outF2; outF2 = outF1; outF1 = out;
}
} break;
}
}
extern "C" void Call_d_recursiveGaussianZ(int *d_src, int *d_dest, int width, int height, int depth,
float b0, float b1, float b2, float b3, float B, int order, int n, int nthread,
float M11, float M12, float M13,
float M21, float M22, float M23,
float M31, float M32, float M33)
{
d_recursiveGaussianZ<<<dim3(n, height), nthread>>>(d_src, d_dest, depth, height, width,
b0, b1, b2, b3, B, order,
M11, M12, M13,
M21, M22, M23,
M31, M32, M33);
}
#endif // CUDARECURSIVEGAUSSIAN_KERNEL_CU
|
b11c5767b47a3a8b34261d45c3ebd76095e62b55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/gather_tree_kernel.h"
#include <algorithm>
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T>
__global__ void GatherTree(const T *ids_data,
const T *parents_data,
T *out_data,
const int64_t max_length,
const int64_t batch_size,
const int64_t beam_size) {
CUDA_KERNEL_LOOP(i, batch_size * beam_size) {
int batch = i / beam_size;
int beam = i % beam_size;
auto idx =
(max_length - 1) * batch_size * beam_size + batch * beam_size + beam;
out_data[idx] = ids_data[idx];
auto parent = parents_data[idx];
for (int step = max_length - 2; step >= 0; step--) {
PADDLE_ENFORCE((parent < beam_size),
"The parents must be less than beam size, but received"
"parents %ld is greater than or equal to beam size %ld. ",
parent,
beam_size);
idx = step * batch_size * beam_size + batch * beam_size;
out_data[idx + beam] = ids_data[idx + parent];
parent = parents_data[idx + parent];
}
}
}
template <typename T, typename Context>
void GatherTreeKernel(const Context &dev_ctx,
const DenseTensor &ids,
const DenseTensor &parents,
DenseTensor *out) {
const auto *ids_data = ids.data<T>();
const auto *parents_data = parents.data<T>();
T *out_data = dev_ctx.template Alloc<T>(out);
PADDLE_ENFORCE_NOT_NULL(ids_data,
phi::errors::InvalidArgument(
"Input(Ids) of gather_tree should not be null."));
PADDLE_ENFORCE_NOT_NULL(
parents_data,
phi::errors::InvalidArgument(
"Input(Parents) of gather_tree should not be null."));
auto &ids_dims = ids.dims();
int64_t max_length = ids_dims[0];
int64_t batch_size = ids_dims[1];
int64_t beam_size = ids_dims[2];
const int block = 512;
int max_threads =
::min(static_cast<int64_t>(dev_ctx.GetMaxPhysicalThreadCount()),
batch_size * beam_size);
const int grid = ::max(max_threads / block, 1);
hipLaunchKernelGGL(( GatherTree), dim3(grid), dim3(block), 0, 0,
ids_data, parents_data, out_data, max_length, batch_size, beam_size);
}
} // namespace phi
PD_REGISTER_KERNEL(
gather_tree, GPU, ALL_LAYOUT, phi::GatherTreeKernel, int, int64_t) {}
| b11c5767b47a3a8b34261d45c3ebd76095e62b55.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/gather_tree_kernel.h"
#include <algorithm>
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T>
__global__ void GatherTree(const T *ids_data,
const T *parents_data,
T *out_data,
const int64_t max_length,
const int64_t batch_size,
const int64_t beam_size) {
CUDA_KERNEL_LOOP(i, batch_size * beam_size) {
int batch = i / beam_size;
int beam = i % beam_size;
auto idx =
(max_length - 1) * batch_size * beam_size + batch * beam_size + beam;
out_data[idx] = ids_data[idx];
auto parent = parents_data[idx];
for (int step = max_length - 2; step >= 0; step--) {
PADDLE_ENFORCE((parent < beam_size),
"The parents must be less than beam size, but received"
"parents %ld is greater than or equal to beam size %ld. ",
parent,
beam_size);
idx = step * batch_size * beam_size + batch * beam_size;
out_data[idx + beam] = ids_data[idx + parent];
parent = parents_data[idx + parent];
}
}
}
template <typename T, typename Context>
void GatherTreeKernel(const Context &dev_ctx,
const DenseTensor &ids,
const DenseTensor &parents,
DenseTensor *out) {
const auto *ids_data = ids.data<T>();
const auto *parents_data = parents.data<T>();
T *out_data = dev_ctx.template Alloc<T>(out);
PADDLE_ENFORCE_NOT_NULL(ids_data,
phi::errors::InvalidArgument(
"Input(Ids) of gather_tree should not be null."));
PADDLE_ENFORCE_NOT_NULL(
parents_data,
phi::errors::InvalidArgument(
"Input(Parents) of gather_tree should not be null."));
auto &ids_dims = ids.dims();
int64_t max_length = ids_dims[0];
int64_t batch_size = ids_dims[1];
int64_t beam_size = ids_dims[2];
const int block = 512;
int max_threads =
std::min(static_cast<int64_t>(dev_ctx.GetMaxPhysicalThreadCount()),
batch_size * beam_size);
const int grid = std::max(max_threads / block, 1);
GatherTree<<<grid, block>>>(
ids_data, parents_data, out_data, max_length, batch_size, beam_size);
}
} // namespace phi
PD_REGISTER_KERNEL(
gather_tree, GPU, ALL_LAYOUT, phi::GatherTreeKernel, int, int64_t) {}
|
05bd8dc96d4e4c923437a29fc3073523fd59bf93.hip | // !!! This is a file automatically generated by hipify!!!
#include<string.h>
#include<malloc.h>
#include<stdio.h>
#include<omp.h>
#include "graph.h"
#include "timer.h"
//#include "algorithm.h"
#include "hip/hip_runtime.h"
// The number of partitioning the outer chunk must be greater or equal to 1
#define ITERATE_IN_OUTER 2
#define NUM_THREADS 2
#define PAGERANK_COEFFICIENT 0.85f
#define PAGERANK_THRESHOLD 0.005f
#ifdef __CUDA_RUNTIME_H__
#define HANDLE_ERROR(err) if (err != hipSuccess) { \
printf("CUDA Error in %s at line %d: %s\n", \
__FILE__, __LINE__, hipGetErrorString(hipGetLastError()));\
exit(1);\
}
#endif // #ifdef __CUDA_RUNTIME_H__
static __global__ void pr_kernel_outer(
const int edge_num,
const int * const edge_src,
const int * const edge_dest,
const int * const out_degree,
const float * const values,
float * const add_values)
{
// total thread number & thread index of this thread
int n = blockDim.x * gridDim.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
float sum=0.0f;
for (int i = index; i < edge_num; i+=n)
{
int src=edge_src[i];
int dest=edge_dest[i];
if (out_degree[src])
{
sum=values[src]/out_degree[src];
atomicAdd(&add_values[dest],sum);
}
}
}
static __global__ void pr_kernel_inner(
const int edge_num,
const int * const edge_src,
const int * const edge_dest,
const int * const out_degree,
const float * const values,
float * const add_values,
int * const continue_flag)
{
// total thread number & thread index of this thread
int n = blockDim.x * gridDim.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
int flag=0;
float sum=0.0f;
for (int i = index; i < edge_num; i+=n)
{
int src=edge_src[i];
int dest=edge_dest[i];
if (out_degree[src])
{
sum=values[src]/out_degree[src];
atomicAdd(&add_values[dest],sum);
}
}
__syncthreads();
//check
float new_value=0.0f;
for (int i = index; i < edge_num; i+=n)
{
new_value=add_values[edge_dest[i]]*PAGERANK_COEFFICIENT+1.0f - PAGERANK_COEFFICIENT;
if (fabs(new_value-values[edge_dest[i]])>PAGERANK_THRESHOLD)
{
flag=1;
}
}
if (flag==1) *continue_flag=1;
}
static __global__ void kernel_extract_values(
int const edge_num,
int const vertex_num,
int * const edge_dest,
float * const add_value,
float * const value
)
{
int n = blockDim.x * gridDim.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = index; i < edge_num; i+=n)
{
int dest=edge_dest[i];
if(dest <= vertex_num )
{
value[dest]=add_value[dest];
add_value[dest]=0.0;
}
}
}
void merge_value_on_cpu(
int const vertex_num,
int const gpu_num,
float * const *h_add_value,
float * const value_gpu ,
int *copy_num,
int flag)
{
int i,id;
float new_value=0.0f;
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel private(i)
{
id=omp_get_thread_num();
for (i = id; i < vertex_num; i=i+NUM_THREADS)
{
if (copy_num[i]>1)
{
new_value=0.0f;
for (int j = 0; j < gpu_num; ++j)
{
new_value+=h_add_value[j][i];
}
new_value=PAGERANK_COEFFICIENT*new_value+1.0 - PAGERANK_COEFFICIENT;
if(fabs(new_value- value_gpu[i]>PAGERANK_THRESHOLD))
flag=1;
value_gpu[i]=new_value;
}
}
}
}
void Gather_result_pr(
int const vertex_num,
int const gpu_num,
int * const copy_num,
float * const *h_add_value,
float * const value_gpu
)
{
int i,id;
float new_value=0.0f;
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel private(i)
{
id=omp_get_thread_num();
for (i = id; i < vertex_num; i=i+NUM_THREADS)
{
if (copy_num[i]>1)
{
new_value=0.0f;
for (int j = 0; j < gpu_num; ++j)
{
new_value+=h_add_value[j][i];
}
new_value=PAGERANK_COEFFICIENT*new_value+1.0 - PAGERANK_COEFFICIENT;
value_gpu[i]=new_value;
}
}
}
}
/* PageRank algorithm on GPU */
void pr_gpu(Graph **g,int gpu_num,float *value_gpu,DataSize *dsize, int* out_degree, int *copy_num, int **position_id)
{
printf("PageRank is running on GPU...............\n");
printf("Start malloc edgelist...\n");
int **h_flag=(int **)malloc(sizeof(int *)*gpu_num);
int vertex_num=dsize->vertex_num;
int **d_edge_inner_src=(int **)malloc(sizeof(int *)*gpu_num);
int **d_edge_inner_dst=(int **)malloc(sizeof(int *)*gpu_num);
int **d_edge_outer_src=(int **)malloc(sizeof(int *)*gpu_num);
int **d_edge_outer_dst=(int **)malloc(sizeof(int *)*gpu_num);
float **h_value=(float **)malloc(sizeof(float *)* gpu_num);
float **h_add_value=(float **)malloc(sizeof(float *)*gpu_num);
float **d_value=(float **)malloc(sizeof(float *)*gpu_num);
//pr different
//float **d_tem_value=(float **)malloc(sizeof(float *)*gpu_num);
float **d_add_value=(float **)malloc(sizeof(float *)*gpu_num);
int **d_outdegree=(int **)malloc(sizeof(int *)*gpu_num);
int **d_flag=(int **)malloc(sizeof(int *)*gpu_num);
/* determine the size of outer vertex in one process*/
int tmp_per_size = min_num_outer_edge(g,gpu_num);
int outer_per_size=tmp_per_size/ITERATE_IN_OUTER;
int iterate_in_outer=ITERATE_IN_OUTER+1;
int *last_outer_per_size=(int *)malloc(sizeof(int)*gpu_num);
memset(last_outer_per_size,0,sizeof(int)*gpu_num);
for (int i = 0; i < gpu_num; ++i)
{
h_value[i]=(float *)malloc(sizeof(float)*(vertex_num+1));
h_add_value[i]=(float *)malloc(sizeof(float)*(vertex_num+1));
//memset 0.0 or 1.0
memset(h_value[i],0.0,sizeof(float)*(vertex_num+1));
h_flag[i]=(int *)malloc(sizeof(int));
}
/*Cuda Malloc*/
/* Malloc stream*/
hipStream_t **stream;
hipEvent_t tmp_start,tmp_stop;
stream=(hipStream_t **)malloc(gpu_num*sizeof(hipStream_t*));
hipEvent_t * start_outer,*stop_outer,*start_inner,*stop_inner,*start_asyn,*stop_asyn;
start_outer=(hipEvent_t *)malloc(gpu_num*sizeof(hipEvent_t));
stop_outer=(hipEvent_t *)malloc(gpu_num*sizeof(hipEvent_t));
start_inner=(hipEvent_t *)malloc(gpu_num*sizeof(hipEvent_t));
stop_inner=(hipEvent_t *)malloc(gpu_num*sizeof(hipEvent_t));
start_asyn=(hipEvent_t *)malloc(gpu_num*sizeof(hipEvent_t));
stop_asyn=(hipEvent_t *)malloc(gpu_num*sizeof(hipEvent_t));
for (int i = 0; i < gpu_num; ++i)
{
hipSetDevice(i);
stream[i]=(hipStream_t *)malloc((iterate_in_outer+1)*sizeof(hipStream_t));
HANDLE_ERROR(hipEventCreate(&start_outer[i],0));
HANDLE_ERROR(hipEventCreate(&stop_outer[i],0));
HANDLE_ERROR(hipEventCreate(&start_inner[i],0));
HANDLE_ERROR(hipEventCreate(&stop_inner[i],0));
HANDLE_ERROR(hipEventCreate(&start_asyn[i],0));
HANDLE_ERROR(hipEventCreate(&stop_asyn[i],0));
for (int j = 0; j <= iterate_in_outer; ++j)
{
HANDLE_ERROR(hipStreamCreate(&stream[i][j]));
}
}
for (int i = 0; i < gpu_num; ++i)
{
hipSetDevice(i);
int out_size=g[i]->edge_outer_num;
int inner_size=g[i]->edge_num - out_size;
HANDLE_ERROR(hipMalloc((void **)&d_edge_outer_src[i],sizeof(int)*out_size));
HANDLE_ERROR(hipMalloc((void **)&d_edge_outer_dst[i],sizeof(int)*out_size));
if (outer_per_size!=0 && outer_per_size < out_size)
{
for (int j = 1; j < iterate_in_outer; ++j)
{
HANDLE_ERROR(hipMemcpyAsync((void *)(d_edge_outer_src[i]+(j-1)*outer_per_size),(void *)(g[i]->edge_outer_src+(j-1)*outer_per_size),sizeof(int)*outer_per_size,hipMemcpyHostToDevice, stream[i][j-1]));
HANDLE_ERROR(hipMemcpyAsync((void *)(d_edge_outer_dst[i]+(j-1)*outer_per_size),(void *)(g[i]->edge_outer_dst+(j-1)*outer_per_size),sizeof(int)*outer_per_size,hipMemcpyHostToDevice, stream[i][j-1]));
}
}
last_outer_per_size[i]=g[i]->edge_outer_num-outer_per_size * (iterate_in_outer-1);
if (last_outer_per_size[i]>0 && iterate_in_outer>1 )
{
HANDLE_ERROR(hipMemcpyAsync((void *)(d_edge_outer_src[i]+(iterate_in_outer-1)*outer_per_size),(void *)(g[i]->edge_outer_src+(iterate_in_outer-1)*outer_per_size),sizeof(int)*last_outer_per_size[i],hipMemcpyHostToDevice, stream[i][iterate_in_outer-1]));
HANDLE_ERROR(hipMemcpyAsync((void *)(d_edge_outer_dst[i]+(iterate_in_outer-1)*outer_per_size),(void *)(g[i]->edge_outer_dst+(iterate_in_outer-1)*outer_per_size),sizeof(int)*last_outer_per_size[i],hipMemcpyHostToDevice, stream[i][iterate_in_outer-1]));
}
HANDLE_ERROR(hipMalloc((void **)&d_edge_inner_src[i],sizeof(int)*inner_size));
HANDLE_ERROR(hipMalloc((void **)&d_edge_inner_dst[i],sizeof(int)*inner_size));
HANDLE_ERROR(hipMemcpyAsync((void *)d_edge_inner_src[i],(void *)g[i]->edge_inner_src,sizeof(int)*inner_size,hipMemcpyHostToDevice,stream[i][iterate_in_outer]));
HANDLE_ERROR(hipMemcpyAsync((void *)d_edge_inner_dst[i],(void *)g[i]->edge_inner_dst,sizeof(int)*inner_size,hipMemcpyHostToDevice,stream[i][iterate_in_outer]));
HANDLE_ERROR(hipMalloc((void **)&d_value[i],sizeof(float)*(vertex_num+1)));
HANDLE_ERROR(hipMemcpyAsync((void *)d_value[i],(void *)h_value[i],sizeof(float)*(vertex_num+1),hipMemcpyHostToDevice,stream[i][0]));
//pr different
HANDLE_ERROR(hipMalloc((void **)&d_add_value[i],sizeof(float)*(vertex_num+1)));
//"memset only works for bytes. If you're using the runtime API, you can use thrust::fill() instead"
//HANDLE_ERROR(hipMemset((void **)&d_add_value[i],0,sizeof(float)*(vertex_num+1)));
//HANDLE_ERROR(hipMalloc((void **)&d_tem_value[i],sizeof(float)*(vertex_num+1)));
//HANDLE_ERROR(hipMalloc((void **)&d_tem_value[i],sizeof(float)*(vertex_num+1)));
HANDLE_ERROR(hipMalloc((void **)&d_outdegree[i],sizeof(int)*(vertex_num+1)));
HANDLE_ERROR(hipMemcpyAsync(d_outdegree[i],out_degree, sizeof(int)*(vertex_num+1),hipMemcpyHostToDevice,stream[i][0]));
HANDLE_ERROR(hipMalloc((void **)&d_flag[i],sizeof(int)));
}
printf("Malloc is finished!\n");
/* Before While: Time Initialization */
float *outer_compute_time,*inner_compute_time,*compute_time,*total_compute_time,*extract_bitmap_time;
float gather_time=0.0;
float cpu_gather_time=0.0;
float total_time=0.0;
float record_time=0.0;
outer_compute_time=(float *)malloc(sizeof(float)*gpu_num);
inner_compute_time=(float *)malloc(sizeof(float)*gpu_num);
compute_time=(float *)malloc(sizeof(float)*gpu_num);
total_compute_time=(float *)malloc(sizeof(float)*gpu_num);
extract_bitmap_time=(float *)malloc(sizeof(float)*gpu_num);
memset(outer_compute_time,0,sizeof(float)*gpu_num);
memset(inner_compute_time,0,sizeof(float)*gpu_num);
memset(compute_time,0,sizeof(float)*gpu_num);
/* Before While: Variable Initialization */
int step=0;
int flag=0;
int inner_edge_num=0;
printf("Computing......\n");
do
{
flag=0;
for (int i = 0; i <gpu_num; ++i)
{
memset(h_flag[i],0,sizeof(int));
hipSetDevice(i);
HANDLE_ERROR(hipMemcpyAsync(d_flag[i],h_flag[i],sizeof(int),hipMemcpyHostToDevice,stream[i][0]));
HANDLE_ERROR(hipEventRecord(start_outer[i], stream[i][0]));
//kernel of outer edgelist
if (outer_per_size!=0 && outer_per_size < g[i]->edge_outer_num)
{
for (int j = 1; j < iterate_in_outer; ++j)
{
hipLaunchKernelGGL(( pr_kernel_outer), dim3(208),dim3(128),0,stream[i][j-1],
outer_per_size,
d_edge_outer_src[i]+(j-1)*outer_per_size,
d_edge_outer_dst[i]+(j-1)*outer_per_size,
d_outdegree[i],
d_value[i],
d_add_value[i]);
//TODO didn't not realize overlap
HANDLE_ERROR(hipMemcpyAsync((void *)(h_add_value[i]),(void *)(d_add_value[i]),sizeof(float)*(vertex_num+1),hipMemcpyDeviceToHost,stream[i][j-1]));
}
}
last_outer_per_size[i]=g[i]->edge_outer_num-outer_per_size * (iterate_in_outer-1);
if (last_outer_per_size[i]>0 && iterate_in_outer>1 )
{
hipLaunchKernelGGL(( pr_kernel_outer), dim3(208),dim3(128),0,stream[i][iterate_in_outer-1],
last_outer_per_size[i],
d_edge_outer_src[i]+(iterate_in_outer-1)*outer_per_size,
d_edge_outer_dst[i]+(iterate_in_outer-1)*outer_per_size,
d_outdegree[i],
d_value[i],
d_add_value[i]);
//TODO didn't not realize
HANDLE_ERROR(hipMemcpyAsync((void *)(h_add_value[i]),(void *)(d_add_value[i]),sizeof(float)*(vertex_num+1),hipMemcpyDeviceToHost,stream[i][iterate_in_outer-1]));
}
HANDLE_ERROR(hipEventRecord(stop_outer[i], stream[i][iterate_in_outer-1]));
HANDLE_ERROR(hipEventRecord(start_inner[i], stream[i][iterate_in_outer]));
//inner+flag
inner_edge_num=g[i]->edge_num-g[i]->edge_outer_num;
if (inner_edge_num>0)
{
hipLaunchKernelGGL(( pr_kernel_inner), dim3(208),dim3(128),0,stream[i][iterate_in_outer],
inner_edge_num,
d_edge_inner_src[i],
d_edge_inner_dst[i],
d_outdegree[i],
d_value[i],
d_add_value[i],
d_flag[i]);
HANDLE_ERROR(hipMemcpyAsync(h_flag[i], d_flag[i],sizeof(int),hipMemcpyDeviceToHost,stream[i][iterate_in_outer]));
}
HANDLE_ERROR(hipEventRecord(stop_inner[i],stream[i][iterate_in_outer]));
}
//merge bitmap on gpu
double t1=omp_get_wtime();
merge_value_on_cpu(vertex_num, gpu_num, h_add_value, value_gpu, copy_num, flag);
double t2=omp_get_wtime();
record_time=(t2-t1)*1000;
gather_time+=record_time;
for (int i = 0; i < gpu_num; ++i)
{
hipSetDevice(i);
//extract bitmap to the value
HANDLE_ERROR(hipMemcpyAsync(d_add_value[i], value_gpu,sizeof(float)*(vertex_num+1),hipMemcpyHostToDevice,stream[i][0]));
HANDLE_ERROR(hipEventRecord(start_asyn[i], stream[i][0]));
// d_value copy to the value of outer vertices
hipLaunchKernelGGL(( kernel_extract_values), dim3(208),dim3(128),0,stream[i][0],
g[i]->edge_outer_num,
vertex_num,
d_edge_outer_dst[i],
d_add_value[i],
d_value[i]
);
HANDLE_ERROR(hipEventRecord(stop_asyn[i], stream[i][0]));
}
for (int i = 0; i < gpu_num; ++i)
{
flag=flag||h_flag[i][0];
}
step++;
//collect time different stream
for (int i = 0; i < gpu_num; ++i)
{
hipSetDevice(i);
HANDLE_ERROR(hipEventSynchronize(stop_outer[i]));
HANDLE_ERROR(hipEventSynchronize(stop_inner[i]));
HANDLE_ERROR(hipEventSynchronize(stop_asyn[i]));
HANDLE_ERROR(hipEventElapsedTime(&record_time, start_outer[i], stop_outer[i]));
outer_compute_time[i]+=record_time;
HANDLE_ERROR(hipEventElapsedTime(&record_time, start_inner[i], stop_inner[i]));
inner_compute_time[i]+=record_time;
HANDLE_ERROR(hipEventElapsedTime(&record_time, start_asyn[i], stop_asyn[i]));
extract_bitmap_time[i]+=record_time;
total_compute_time[i]=outer_compute_time[i]+extract_bitmap_time[i]-inner_compute_time[i]>0?(outer_compute_time[i]+extract_bitmap_time[i]):inner_compute_time[i];
}
}while(flag && step<10);
//Todo to get the true value of inner vertice and outer vertice
for (int i = 0; i < gpu_num; ++i)
{
hipSetDevice(i);
hipMemcpyAsync((void *)h_value[i],(void *)d_value[i],sizeof(int)*(vertex_num+1),hipMemcpyDeviceToHost,stream[i][0]);
}
printf("Gather result on cpu....\n");
Gather_result_pr(vertex_num,gpu_num,copy_num,h_add_value,value_gpu);
printf("Time print\n");
//collect the information of time
float total_time_n=0.0;
for (int i = 0; i < gpu_num; ++i)
{
if(total_time_n<total_compute_time[i])
total_time_n=total_compute_time[i];
}
total_time=total_time_n>gather_time?total_time_n:gather_time;
printf("Total time of pr_gpu is %.3f ms\n",total_time);
printf("Elapsed time of pr_gpu is %.3f ms\n", total_time/step);
printf("-------------------------------------------------------\n");
printf("Detail:\n");
printf("\n");
for (int i = 0; i < gpu_num; ++i)
{
printf("GPU %d\n",i);
printf("Outer_Compute_Time(include pre-stage): %.3f ms\n", outer_compute_time[i]);
printf("Inner_Compute_Time: %.3f ms\n", inner_compute_time[i]);
printf("Total Compute_Time %.3f ms\n", total_compute_time[i]);
printf("Extract_Bitmap_Time %.3f ms\n", extract_bitmap_time[i]);
}
printf("CPU \n");
printf("CPU_Gather_Time: %.3f ms\n", gather_time);
printf("--------------------------------------------------------\n");
//clean
for (int i = 0; i < gpu_num; ++i)
{
hipSetDevice(i);
//HANDLE_ERROR(hipEventDestroy(start[i]));
//HANDLE_ERROR(hipEventDestroy(stop[i]));
HANDLE_ERROR(hipFree(d_edge_outer_src[i]));
HANDLE_ERROR(hipFree(d_edge_outer_dst[i]));
HANDLE_ERROR(hipFree(d_edge_inner_src[i]));
HANDLE_ERROR(hipFree(d_edge_inner_dst[i]));
HANDLE_ERROR(hipFree(d_value[i]));
HANDLE_ERROR(hipFree(d_flag[i]));
HANDLE_ERROR(hipDeviceReset());
//error
//free(h_value[i]);
free(h_flag[i]);
free(stream[i]);
}
free(outer_compute_time);
free(inner_compute_time);
free(compute_time);
}
| 05bd8dc96d4e4c923437a29fc3073523fd59bf93.cu | #include<string.h>
#include<malloc.h>
#include<stdio.h>
#include<omp.h>
#include "graph.h"
#include "timer.h"
//#include "algorithm.h"
#include "cuda_runtime.h"
// The number of partitioning the outer chunk must be greater or equal to 1
#define ITERATE_IN_OUTER 2
#define NUM_THREADS 2
#define PAGERANK_COEFFICIENT 0.85f
#define PAGERANK_THRESHOLD 0.005f
#ifdef __CUDA_RUNTIME_H__
#define HANDLE_ERROR(err) if (err != cudaSuccess) { \
printf("CUDA Error in %s at line %d: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(cudaGetLastError()));\
exit(1);\
}
#endif // #ifdef __CUDA_RUNTIME_H__
static __global__ void pr_kernel_outer(
const int edge_num,
const int * const edge_src,
const int * const edge_dest,
const int * const out_degree,
const float * const values,
float * const add_values)
{
// total thread number & thread index of this thread
int n = blockDim.x * gridDim.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
float sum=0.0f;
for (int i = index; i < edge_num; i+=n)
{
int src=edge_src[i];
int dest=edge_dest[i];
if (out_degree[src])
{
sum=values[src]/out_degree[src];
atomicAdd(&add_values[dest],sum);
}
}
}
static __global__ void pr_kernel_inner(
const int edge_num,
const int * const edge_src,
const int * const edge_dest,
const int * const out_degree,
const float * const values,
float * const add_values,
int * const continue_flag)
{
// total thread number & thread index of this thread
int n = blockDim.x * gridDim.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
int flag=0;
float sum=0.0f;
for (int i = index; i < edge_num; i+=n)
{
int src=edge_src[i];
int dest=edge_dest[i];
if (out_degree[src])
{
sum=values[src]/out_degree[src];
atomicAdd(&add_values[dest],sum);
}
}
__syncthreads();
//check
float new_value=0.0f;
for (int i = index; i < edge_num; i+=n)
{
new_value=add_values[edge_dest[i]]*PAGERANK_COEFFICIENT+1.0f - PAGERANK_COEFFICIENT;
if (fabs(new_value-values[edge_dest[i]])>PAGERANK_THRESHOLD)
{
flag=1;
}
}
if (flag==1) *continue_flag=1;
}
static __global__ void kernel_extract_values(
int const edge_num,
int const vertex_num,
int * const edge_dest,
float * const add_value,
float * const value
)
{
int n = blockDim.x * gridDim.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = index; i < edge_num; i+=n)
{
int dest=edge_dest[i];
if(dest <= vertex_num )
{
value[dest]=add_value[dest];
add_value[dest]=0.0;
}
}
}
void merge_value_on_cpu(
int const vertex_num,
int const gpu_num,
float * const *h_add_value,
float * const value_gpu ,
int *copy_num,
int flag)
{
int i,id;
float new_value=0.0f;
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel private(i)
{
id=omp_get_thread_num();
for (i = id; i < vertex_num; i=i+NUM_THREADS)
{
if (copy_num[i]>1)
{
new_value=0.0f;
for (int j = 0; j < gpu_num; ++j)
{
new_value+=h_add_value[j][i];
}
new_value=PAGERANK_COEFFICIENT*new_value+1.0 - PAGERANK_COEFFICIENT;
if(fabs(new_value- value_gpu[i]>PAGERANK_THRESHOLD))
flag=1;
value_gpu[i]=new_value;
}
}
}
}
void Gather_result_pr(
int const vertex_num,
int const gpu_num,
int * const copy_num,
float * const *h_add_value,
float * const value_gpu
)
{
int i,id;
float new_value=0.0f;
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel private(i)
{
id=omp_get_thread_num();
for (i = id; i < vertex_num; i=i+NUM_THREADS)
{
if (copy_num[i]>1)
{
new_value=0.0f;
for (int j = 0; j < gpu_num; ++j)
{
new_value+=h_add_value[j][i];
}
new_value=PAGERANK_COEFFICIENT*new_value+1.0 - PAGERANK_COEFFICIENT;
value_gpu[i]=new_value;
}
}
}
}
/* PageRank algorithm on GPU */
void pr_gpu(Graph **g,int gpu_num,float *value_gpu,DataSize *dsize, int* out_degree, int *copy_num, int **position_id)
{
printf("PageRank is running on GPU...............\n");
printf("Start malloc edgelist...\n");
int **h_flag=(int **)malloc(sizeof(int *)*gpu_num);
int vertex_num=dsize->vertex_num;
int **d_edge_inner_src=(int **)malloc(sizeof(int *)*gpu_num);
int **d_edge_inner_dst=(int **)malloc(sizeof(int *)*gpu_num);
int **d_edge_outer_src=(int **)malloc(sizeof(int *)*gpu_num);
int **d_edge_outer_dst=(int **)malloc(sizeof(int *)*gpu_num);
float **h_value=(float **)malloc(sizeof(float *)* gpu_num);
float **h_add_value=(float **)malloc(sizeof(float *)*gpu_num);
float **d_value=(float **)malloc(sizeof(float *)*gpu_num);
//pr different
//float **d_tem_value=(float **)malloc(sizeof(float *)*gpu_num);
float **d_add_value=(float **)malloc(sizeof(float *)*gpu_num);
int **d_outdegree=(int **)malloc(sizeof(int *)*gpu_num);
int **d_flag=(int **)malloc(sizeof(int *)*gpu_num);
/* determine the size of outer vertex in one process*/
int tmp_per_size = min_num_outer_edge(g,gpu_num);
int outer_per_size=tmp_per_size/ITERATE_IN_OUTER;
int iterate_in_outer=ITERATE_IN_OUTER+1;
int *last_outer_per_size=(int *)malloc(sizeof(int)*gpu_num);
memset(last_outer_per_size,0,sizeof(int)*gpu_num);
for (int i = 0; i < gpu_num; ++i)
{
h_value[i]=(float *)malloc(sizeof(float)*(vertex_num+1));
h_add_value[i]=(float *)malloc(sizeof(float)*(vertex_num+1));
//memset 0.0 or 1.0
memset(h_value[i],0.0,sizeof(float)*(vertex_num+1));
h_flag[i]=(int *)malloc(sizeof(int));
}
/*Cuda Malloc*/
/* Malloc stream*/
cudaStream_t **stream;
cudaEvent_t tmp_start,tmp_stop;
stream=(cudaStream_t **)malloc(gpu_num*sizeof(cudaStream_t*));
cudaEvent_t * start_outer,*stop_outer,*start_inner,*stop_inner,*start_asyn,*stop_asyn;
start_outer=(cudaEvent_t *)malloc(gpu_num*sizeof(cudaEvent_t));
stop_outer=(cudaEvent_t *)malloc(gpu_num*sizeof(cudaEvent_t));
start_inner=(cudaEvent_t *)malloc(gpu_num*sizeof(cudaEvent_t));
stop_inner=(cudaEvent_t *)malloc(gpu_num*sizeof(cudaEvent_t));
start_asyn=(cudaEvent_t *)malloc(gpu_num*sizeof(cudaEvent_t));
stop_asyn=(cudaEvent_t *)malloc(gpu_num*sizeof(cudaEvent_t));
for (int i = 0; i < gpu_num; ++i)
{
cudaSetDevice(i);
stream[i]=(cudaStream_t *)malloc((iterate_in_outer+1)*sizeof(cudaStream_t));
HANDLE_ERROR(cudaEventCreate(&start_outer[i],0));
HANDLE_ERROR(cudaEventCreate(&stop_outer[i],0));
HANDLE_ERROR(cudaEventCreate(&start_inner[i],0));
HANDLE_ERROR(cudaEventCreate(&stop_inner[i],0));
HANDLE_ERROR(cudaEventCreate(&start_asyn[i],0));
HANDLE_ERROR(cudaEventCreate(&stop_asyn[i],0));
for (int j = 0; j <= iterate_in_outer; ++j)
{
HANDLE_ERROR(cudaStreamCreate(&stream[i][j]));
}
}
for (int i = 0; i < gpu_num; ++i)
{
cudaSetDevice(i);
int out_size=g[i]->edge_outer_num;
int inner_size=g[i]->edge_num - out_size;
HANDLE_ERROR(cudaMalloc((void **)&d_edge_outer_src[i],sizeof(int)*out_size));
HANDLE_ERROR(cudaMalloc((void **)&d_edge_outer_dst[i],sizeof(int)*out_size));
if (outer_per_size!=0 && outer_per_size < out_size)
{
for (int j = 1; j < iterate_in_outer; ++j)
{
HANDLE_ERROR(cudaMemcpyAsync((void *)(d_edge_outer_src[i]+(j-1)*outer_per_size),(void *)(g[i]->edge_outer_src+(j-1)*outer_per_size),sizeof(int)*outer_per_size,cudaMemcpyHostToDevice, stream[i][j-1]));
HANDLE_ERROR(cudaMemcpyAsync((void *)(d_edge_outer_dst[i]+(j-1)*outer_per_size),(void *)(g[i]->edge_outer_dst+(j-1)*outer_per_size),sizeof(int)*outer_per_size,cudaMemcpyHostToDevice, stream[i][j-1]));
}
}
last_outer_per_size[i]=g[i]->edge_outer_num-outer_per_size * (iterate_in_outer-1);
if (last_outer_per_size[i]>0 && iterate_in_outer>1 )
{
HANDLE_ERROR(cudaMemcpyAsync((void *)(d_edge_outer_src[i]+(iterate_in_outer-1)*outer_per_size),(void *)(g[i]->edge_outer_src+(iterate_in_outer-1)*outer_per_size),sizeof(int)*last_outer_per_size[i],cudaMemcpyHostToDevice, stream[i][iterate_in_outer-1]));
HANDLE_ERROR(cudaMemcpyAsync((void *)(d_edge_outer_dst[i]+(iterate_in_outer-1)*outer_per_size),(void *)(g[i]->edge_outer_dst+(iterate_in_outer-1)*outer_per_size),sizeof(int)*last_outer_per_size[i],cudaMemcpyHostToDevice, stream[i][iterate_in_outer-1]));
}
HANDLE_ERROR(cudaMalloc((void **)&d_edge_inner_src[i],sizeof(int)*inner_size));
HANDLE_ERROR(cudaMalloc((void **)&d_edge_inner_dst[i],sizeof(int)*inner_size));
HANDLE_ERROR(cudaMemcpyAsync((void *)d_edge_inner_src[i],(void *)g[i]->edge_inner_src,sizeof(int)*inner_size,cudaMemcpyHostToDevice,stream[i][iterate_in_outer]));
HANDLE_ERROR(cudaMemcpyAsync((void *)d_edge_inner_dst[i],(void *)g[i]->edge_inner_dst,sizeof(int)*inner_size,cudaMemcpyHostToDevice,stream[i][iterate_in_outer]));
HANDLE_ERROR(cudaMalloc((void **)&d_value[i],sizeof(float)*(vertex_num+1)));
HANDLE_ERROR(cudaMemcpyAsync((void *)d_value[i],(void *)h_value[i],sizeof(float)*(vertex_num+1),cudaMemcpyHostToDevice,stream[i][0]));
//pr different
HANDLE_ERROR(cudaMalloc((void **)&d_add_value[i],sizeof(float)*(vertex_num+1)));
//"memset only works for bytes. If you're using the runtime API, you can use thrust::fill() instead"
//HANDLE_ERROR(cudaMemset((void **)&d_add_value[i],0,sizeof(float)*(vertex_num+1)));
//HANDLE_ERROR(cudaMalloc((void **)&d_tem_value[i],sizeof(float)*(vertex_num+1)));
//HANDLE_ERROR(cudaMalloc((void **)&d_tem_value[i],sizeof(float)*(vertex_num+1)));
HANDLE_ERROR(cudaMalloc((void **)&d_outdegree[i],sizeof(int)*(vertex_num+1)));
HANDLE_ERROR(cudaMemcpyAsync(d_outdegree[i],out_degree, sizeof(int)*(vertex_num+1),cudaMemcpyHostToDevice,stream[i][0]));
HANDLE_ERROR(cudaMalloc((void **)&d_flag[i],sizeof(int)));
}
printf("Malloc is finished!\n");
/* Before While: Time Initialization */
float *outer_compute_time,*inner_compute_time,*compute_time,*total_compute_time,*extract_bitmap_time;
float gather_time=0.0;
float cpu_gather_time=0.0;
float total_time=0.0;
float record_time=0.0;
outer_compute_time=(float *)malloc(sizeof(float)*gpu_num);
inner_compute_time=(float *)malloc(sizeof(float)*gpu_num);
compute_time=(float *)malloc(sizeof(float)*gpu_num);
total_compute_time=(float *)malloc(sizeof(float)*gpu_num);
extract_bitmap_time=(float *)malloc(sizeof(float)*gpu_num);
memset(outer_compute_time,0,sizeof(float)*gpu_num);
memset(inner_compute_time,0,sizeof(float)*gpu_num);
memset(compute_time,0,sizeof(float)*gpu_num);
/* Before While: Variable Initialization */
int step=0;
int flag=0;
int inner_edge_num=0;
printf("Computing......\n");
do
{
flag=0;
for (int i = 0; i <gpu_num; ++i)
{
memset(h_flag[i],0,sizeof(int));
cudaSetDevice(i);
HANDLE_ERROR(cudaMemcpyAsync(d_flag[i],h_flag[i],sizeof(int),cudaMemcpyHostToDevice,stream[i][0]));
HANDLE_ERROR(cudaEventRecord(start_outer[i], stream[i][0]));
//kernel of outer edgelist
if (outer_per_size!=0 && outer_per_size < g[i]->edge_outer_num)
{
for (int j = 1; j < iterate_in_outer; ++j)
{
pr_kernel_outer<<<208,128,0,stream[i][j-1]>>>(
outer_per_size,
d_edge_outer_src[i]+(j-1)*outer_per_size,
d_edge_outer_dst[i]+(j-1)*outer_per_size,
d_outdegree[i],
d_value[i],
d_add_value[i]);
//TODO didn't not realize overlap
HANDLE_ERROR(cudaMemcpyAsync((void *)(h_add_value[i]),(void *)(d_add_value[i]),sizeof(float)*(vertex_num+1),cudaMemcpyDeviceToHost,stream[i][j-1]));
}
}
last_outer_per_size[i]=g[i]->edge_outer_num-outer_per_size * (iterate_in_outer-1);
if (last_outer_per_size[i]>0 && iterate_in_outer>1 )
{
pr_kernel_outer<<<208,128,0,stream[i][iterate_in_outer-1]>>>(
last_outer_per_size[i],
d_edge_outer_src[i]+(iterate_in_outer-1)*outer_per_size,
d_edge_outer_dst[i]+(iterate_in_outer-1)*outer_per_size,
d_outdegree[i],
d_value[i],
d_add_value[i]);
//TODO didn't not realize
HANDLE_ERROR(cudaMemcpyAsync((void *)(h_add_value[i]),(void *)(d_add_value[i]),sizeof(float)*(vertex_num+1),cudaMemcpyDeviceToHost,stream[i][iterate_in_outer-1]));
}
HANDLE_ERROR(cudaEventRecord(stop_outer[i], stream[i][iterate_in_outer-1]));
HANDLE_ERROR(cudaEventRecord(start_inner[i], stream[i][iterate_in_outer]));
//inner+flag
inner_edge_num=g[i]->edge_num-g[i]->edge_outer_num;
if (inner_edge_num>0)
{
pr_kernel_inner<<<208,128,0,stream[i][iterate_in_outer]>>>(
inner_edge_num,
d_edge_inner_src[i],
d_edge_inner_dst[i],
d_outdegree[i],
d_value[i],
d_add_value[i],
d_flag[i]);
HANDLE_ERROR(cudaMemcpyAsync(h_flag[i], d_flag[i],sizeof(int),cudaMemcpyDeviceToHost,stream[i][iterate_in_outer]));
}
HANDLE_ERROR(cudaEventRecord(stop_inner[i],stream[i][iterate_in_outer]));
}
//merge bitmap on gpu
double t1=omp_get_wtime();
merge_value_on_cpu(vertex_num, gpu_num, h_add_value, value_gpu, copy_num, flag);
double t2=omp_get_wtime();
record_time=(t2-t1)*1000;
gather_time+=record_time;
for (int i = 0; i < gpu_num; ++i)
{
cudaSetDevice(i);
//extract bitmap to the value
HANDLE_ERROR(cudaMemcpyAsync(d_add_value[i], value_gpu,sizeof(float)*(vertex_num+1),cudaMemcpyHostToDevice,stream[i][0]));
HANDLE_ERROR(cudaEventRecord(start_asyn[i], stream[i][0]));
// d_value copy to the value of outer vertices
kernel_extract_values<<<208,128,0,stream[i][0]>>>
(
g[i]->edge_outer_num,
vertex_num,
d_edge_outer_dst[i],
d_add_value[i],
d_value[i]
);
HANDLE_ERROR(cudaEventRecord(stop_asyn[i], stream[i][0]));
}
for (int i = 0; i < gpu_num; ++i)
{
flag=flag||h_flag[i][0];
}
step++;
//collect time different stream
for (int i = 0; i < gpu_num; ++i)
{
cudaSetDevice(i);
HANDLE_ERROR(cudaEventSynchronize(stop_outer[i]));
HANDLE_ERROR(cudaEventSynchronize(stop_inner[i]));
HANDLE_ERROR(cudaEventSynchronize(stop_asyn[i]));
HANDLE_ERROR(cudaEventElapsedTime(&record_time, start_outer[i], stop_outer[i]));
outer_compute_time[i]+=record_time;
HANDLE_ERROR(cudaEventElapsedTime(&record_time, start_inner[i], stop_inner[i]));
inner_compute_time[i]+=record_time;
HANDLE_ERROR(cudaEventElapsedTime(&record_time, start_asyn[i], stop_asyn[i]));
extract_bitmap_time[i]+=record_time;
total_compute_time[i]=outer_compute_time[i]+extract_bitmap_time[i]-inner_compute_time[i]>0?(outer_compute_time[i]+extract_bitmap_time[i]):inner_compute_time[i];
}
}while(flag && step<10);
//Todo to get the true value of inner vertice and outer vertice
for (int i = 0; i < gpu_num; ++i)
{
cudaSetDevice(i);
cudaMemcpyAsync((void *)h_value[i],(void *)d_value[i],sizeof(int)*(vertex_num+1),cudaMemcpyDeviceToHost,stream[i][0]);
}
printf("Gather result on cpu....\n");
Gather_result_pr(vertex_num,gpu_num,copy_num,h_add_value,value_gpu);
printf("Time print\n");
//collect the information of time
float total_time_n=0.0;
for (int i = 0; i < gpu_num; ++i)
{
if(total_time_n<total_compute_time[i])
total_time_n=total_compute_time[i];
}
total_time=total_time_n>gather_time?total_time_n:gather_time;
printf("Total time of pr_gpu is %.3f ms\n",total_time);
printf("Elapsed time of pr_gpu is %.3f ms\n", total_time/step);
printf("-------------------------------------------------------\n");
printf("Detail:\n");
printf("\n");
for (int i = 0; i < gpu_num; ++i)
{
printf("GPU %d\n",i);
printf("Outer_Compute_Time(include pre-stage): %.3f ms\n", outer_compute_time[i]);
printf("Inner_Compute_Time: %.3f ms\n", inner_compute_time[i]);
printf("Total Compute_Time %.3f ms\n", total_compute_time[i]);
printf("Extract_Bitmap_Time %.3f ms\n", extract_bitmap_time[i]);
}
printf("CPU \n");
printf("CPU_Gather_Time: %.3f ms\n", gather_time);
printf("--------------------------------------------------------\n");
//clean
for (int i = 0; i < gpu_num; ++i)
{
cudaSetDevice(i);
//HANDLE_ERROR(cudaEventDestroy(start[i]));
//HANDLE_ERROR(cudaEventDestroy(stop[i]));
HANDLE_ERROR(cudaFree(d_edge_outer_src[i]));
HANDLE_ERROR(cudaFree(d_edge_outer_dst[i]));
HANDLE_ERROR(cudaFree(d_edge_inner_src[i]));
HANDLE_ERROR(cudaFree(d_edge_inner_dst[i]));
HANDLE_ERROR(cudaFree(d_value[i]));
HANDLE_ERROR(cudaFree(d_flag[i]));
HANDLE_ERROR(cudaDeviceReset());
//error
//free(h_value[i]);
free(h_flag[i]);
free(stream[i]);
}
free(outer_compute_time);
free(inner_compute_time);
free(compute_time);
}
|
9b1f86ab2795e89a27e0a7d60397079a2e8b5ac4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "itf/engine/layer.hpp"
#include "itf/engine/vision_layers.hpp"
namespace itf {
template <typename Dtype>
__global__ void ThresholdForward(const int n, const Dtype threshold,
const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > threshold ? 1 : 0;
}
}
template <typename Dtype>
void ThresholdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ThresholdForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, threshold_, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer);
} // namespace itf
| 9b1f86ab2795e89a27e0a7d60397079a2e8b5ac4.cu | #include <algorithm>
#include <vector>
#include "itf/engine/layer.hpp"
#include "itf/engine/vision_layers.hpp"
namespace itf {
template <typename Dtype>
__global__ void ThresholdForward(const int n, const Dtype threshold,
const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > threshold ? 1 : 0;
}
}
template <typename Dtype>
void ThresholdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ThresholdForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, threshold_, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer);
} // namespace itf
|
0a620ad694882b77e56f04116e4d1d1d8338652e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/AccumulateType.h"
#include "ATen/TensorUtils.h"
#include "ATen/hip/HIPContext.h"
#include "c10/util/Exception.h"
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
#ifdef __HIP_PLATFORM_HCC__
static const int WARP_SIZE = 64;
static const int BLOCKDIMY = 16;
#else
static const int WARP_SIZE = 32;
static const int BLOCKDIMY = 32;
#endif
template
<typename scalar_t,
typename accscalar_t>
__global__ void embedding_backward_feature_kernel
(int64_t* indices,
const scalar_t* __restrict__ grad,
scalar_t* __restrict__ grad_weight,
int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot
int64_t stride,
int padding_idx)
{
extern __shared__ char buf[];
accscalar_t* smem = (accscalar_t*)buf;
accscalar_t* my_s = smem + WARP_SIZE*threadIdx.y;
int* indices_batch = (int*)(buf + sizeof(accscalar_t)*WARP_SIZE*blockDim.y);
const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size
const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim
for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y)
{
// Entire block cooperates to load a batch of 1024 indices to process
int tid = threadIdx.x + threadIdx.y*blockDim.x;
if(batch_start + tid < n)
indices_batch[tid] = (int)indices[batch_start + tid];
int batch_end = batch_start + blockDim.x*blockDim.y < n ?
batch_start + blockDim.x*blockDim.y : n;
// Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32
for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y)
{
// This does double duty: it makes sure indices_batch is ready, and it makes sure match-group
// leaders are done with their accumulates before other warps start loading again.
__syncthreads();
int n_this_chunk = (batch_end - chunk_start) < blockDim.y ?
(batch_end - chunk_start) : blockDim.y;
int src_row = chunk_start + threadIdx.y;
int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight
// All warps load their smem segments with incoming grad data
if(src_row < n && f < s && dst_row != padding_idx)
my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]);
__syncthreads();
// To ensure determinism, we can't just have each warp add its grad data to its dst_row.
// We need to check if any other warps pulled grad data targeting dst_row.
// If so, we elect the first warp in each matching group as the leader.
// Each leader warp serializes the accumulates targeting dst_row in shared memory,
// then finishes by adding the accumulated buffer to dst_row in grad_weight.
if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync
{
int match_found_this_thread =
(dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]);
if(threadIdx.x >= n_this_chunk)
match_found_this_thread = 0;
#ifdef __HIP_PLATFORM_HCC__
unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread);
#else
unsigned int matchmask = WARP_BALLOT(match_found_this_thread);
#endif
int first_remaining_peer = __ffs(matchmask) - 1;
if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader
{
matchmask ^= (1 << first_remaining_peer);
while(matchmask)
{
first_remaining_peer = __ffs(matchmask) - 1;
my_s[threadIdx.x] += smem[threadIdx.x + WARP_SIZE*first_remaining_peer];
matchmask ^= (1 << first_remaining_peer);
}
if(f < s)
grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]);
}
}
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int64_t dim,
int64_t weights_stride0, int64_t weights_stride1) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * weights_stride0;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += ::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = ::pow(v, static_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i * weights_stride1] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options());
int64_t stride = grad_weight.stride(0);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (num_indices <= 768 && !scale_grad_by_freq) {
auto indices_contig = indices.contiguous();
dim3 grid(THCCeilDiv(stride, (int64_t)WARP_SIZE));
dim3 block(WARP_SIZE, BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(grad.type(),
"embedding_backward",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( embedding_backward_feature_kernel<scalar_t, accscalar_t>)
, dim3(grid),
dim3(block),
sizeof(accscalar_t)*WARP_SIZE*BLOCKDIMY + sizeof(int)*WARP_SIZE*BLOCKDIMY,
stream,
indices_contig.data<int64_t>(),
grad.data<scalar_t>(),
grad_weight.data<scalar_t>(),
static_cast<int>(num_indices),
static_cast<int64_t>(stride),
static_cast<int>(padding_idx));
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
auto sorted_indices = at::empty_like(indices);
auto orig_indices = at::empty_like(indices);
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = at::empty_like(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
hipLaunchKernelGGL(( embedding_backward_kernel), dim3(grid), dim3(block), 0, stream,
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<scalar_t>(),
grad_weight.data<scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_contig = indices.contiguous();
auto indices_data = device_ptr(indices_contig.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contain duplicates which are not
// adjacent
auto unique_indices = at::empty(indices.numel(), indices.options());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "embedding_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), 128 * sizeof(accscalar_t), stream,
self.data<scalar_t>(),
unique_indices.data<int64_t>(),
static_cast<accscalar_t>(max_norm),
static_cast<accscalar_t>(norm_type),
dim, self.stride(0), self.stride(1));
});
THCudaCheck(hipGetLastError());
return self;
}
}} // namespace at::native
| 0a620ad694882b77e56f04116e4d1d1d8338652e.cu | #include "ATen/ATen.h"
#include "ATen/AccumulateType.h"
#include "ATen/TensorUtils.h"
#include "ATen/cuda/CUDAContext.h"
#include "c10/util/Exception.h"
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
#ifdef __HIP_PLATFORM_HCC__
static const int WARP_SIZE = 64;
static const int BLOCKDIMY = 16;
#else
static const int WARP_SIZE = 32;
static const int BLOCKDIMY = 32;
#endif
template
<typename scalar_t,
typename accscalar_t>
__global__ void embedding_backward_feature_kernel
(int64_t* indices,
const scalar_t* __restrict__ grad,
scalar_t* __restrict__ grad_weight,
int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot
int64_t stride,
int padding_idx)
{
extern __shared__ char buf[];
accscalar_t* smem = (accscalar_t*)buf;
accscalar_t* my_s = smem + WARP_SIZE*threadIdx.y;
int* indices_batch = (int*)(buf + sizeof(accscalar_t)*WARP_SIZE*blockDim.y);
const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size
const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim
for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y)
{
// Entire block cooperates to load a batch of 1024 indices to process
int tid = threadIdx.x + threadIdx.y*blockDim.x;
if(batch_start + tid < n)
indices_batch[tid] = (int)indices[batch_start + tid];
int batch_end = batch_start + blockDim.x*blockDim.y < n ?
batch_start + blockDim.x*blockDim.y : n;
// Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32
for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y)
{
// This does double duty: it makes sure indices_batch is ready, and it makes sure match-group
// leaders are done with their accumulates before other warps start loading again.
__syncthreads();
int n_this_chunk = (batch_end - chunk_start) < blockDim.y ?
(batch_end - chunk_start) : blockDim.y;
int src_row = chunk_start + threadIdx.y;
int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight
// All warps load their smem segments with incoming grad data
if(src_row < n && f < s && dst_row != padding_idx)
my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]);
__syncthreads();
// To ensure determinism, we can't just have each warp add its grad data to its dst_row.
// We need to check if any other warps pulled grad data targeting dst_row.
// If so, we elect the first warp in each matching group as the leader.
// Each leader warp serializes the accumulates targeting dst_row in shared memory,
// then finishes by adding the accumulated buffer to dst_row in grad_weight.
if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync
{
int match_found_this_thread =
(dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]);
if(threadIdx.x >= n_this_chunk)
match_found_this_thread = 0;
#ifdef __HIP_PLATFORM_HCC__
unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread);
#else
unsigned int matchmask = WARP_BALLOT(match_found_this_thread);
#endif
int first_remaining_peer = __ffs(matchmask) - 1;
if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader
{
matchmask ^= (1 << first_remaining_peer);
while(matchmask)
{
first_remaining_peer = __ffs(matchmask) - 1;
my_s[threadIdx.x] += smem[threadIdx.x + WARP_SIZE*first_remaining_peer];
matchmask ^= (1 << first_remaining_peer);
}
if(f < s)
grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]);
}
}
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int64_t dim,
int64_t weights_stride0, int64_t weights_stride1) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * weights_stride0;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += std::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = std::pow(v, static_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i * weights_stride1] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options());
int64_t stride = grad_weight.stride(0);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (num_indices <= 768 && !scale_grad_by_freq) {
auto indices_contig = indices.contiguous();
dim3 grid(THCCeilDiv(stride, (int64_t)WARP_SIZE));
dim3 block(WARP_SIZE, BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(grad.type(),
"embedding_backward",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
embedding_backward_feature_kernel<scalar_t, accscalar_t>
<<<grid,
block,
sizeof(accscalar_t)*WARP_SIZE*BLOCKDIMY + sizeof(int)*WARP_SIZE*BLOCKDIMY,
stream>>>
(indices_contig.data<int64_t>(),
grad.data<scalar_t>(),
grad_weight.data<scalar_t>(),
static_cast<int>(num_indices),
static_cast<int64_t>(stride),
static_cast<int>(padding_idx));
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
auto sorted_indices = at::empty_like(indices);
auto orig_indices = at::empty_like(indices);
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = at::empty_like(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
embedding_backward_kernel<<<grid, block, 0, stream>>>(
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<scalar_t>(),
grad_weight.data<scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_contig = indices.contiguous();
auto indices_data = device_ptr(indices_contig.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contain duplicates which are not
// adjacent
auto unique_indices = at::empty(indices.numel(), indices.options());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "embedding_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>(
self.data<scalar_t>(),
unique_indices.data<int64_t>(),
static_cast<accscalar_t>(max_norm),
static_cast<accscalar_t>(norm_type),
dim, self.stride(0), self.stride(1));
});
THCudaCheck(cudaGetLastError());
return self;
}
}} // namespace at::native
|
e7ef495583585e103f8c20b7f2e5a8d53ac487a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/audio/preemphasis_filter_op.h"
#include <vector>
#include "dali/pipeline/data/types.h"
namespace dali {
namespace detail {
template <typename OutputType, typename InputType>
struct SampleDescriptor {
const InputType *in;
OutputType *out;
float coeff;
int64_t size;
};
using BorderType = PreemphasisFilter<GPUBackend>::BorderType;
template <typename OutputType, typename InputType>
void __global__ PreemphasisFilterKernel(const SampleDescriptor<OutputType, InputType> *samples,
BorderType border_type) {
const auto &sample = samples[blockIdx.y];
int64_t block_size = blockDim.x;
int64_t block_start = block_size * blockIdx.x;
int64_t grid_stride = block_size * gridDim.x;
int64_t k = block_start + threadIdx.x;
if (k >= sample.size)
return;
if (k == 0) {
if (border_type == BorderType::Zero) {
sample.out[k] = sample.in[k];
} else {
// BorderType::Reflect or BorderType::Clamp
InputType border = (border_type == BorderType::Reflect) ? sample.in[1] : sample.in[0];
sample.out[k] = sample.in[k] - sample.coeff * border;
}
k += grid_stride;
}
for (; k < sample.size; k += grid_stride)
sample.out[k] = sample.in[k] - sample.coeff * sample.in[k-1];
}
} // namespace detail
class PreemphasisFilterGPU : public PreemphasisFilter<GPUBackend> {
public:
explicit PreemphasisFilterGPU(const OpSpec &spec) : PreemphasisFilter<GPUBackend>(spec) {
// void is OK here, pointer sizes are the same size
int64_t sz = max_batch_size_ * sizeof(detail::SampleDescriptor<void, void>);
scratch_mem_.set_type(TypeTable::GetTypeInfo(DALI_UINT8));
scratch_mem_.Resize({sz});
}
void RunImpl(workspace_t<GPUBackend> &ws) override;
private:
template <typename OutputType, typename InputType>
void RunImplTyped(workspace_t<GPUBackend> &ws);
Tensor<GPUBackend> scratch_mem_;
};
template <typename OutputType, typename InputType>
void PreemphasisFilterGPU::RunImplTyped(workspace_t<GPUBackend> &ws) {
using SampleDesc = detail::SampleDescriptor<OutputType, InputType>;
const auto &input = ws.InputRef<GPUBackend>(0);
auto &output = ws.OutputRef<GPUBackend>(0);
auto curr_batch_size = ws.GetInputBatchSize(0);
std::vector<SampleDesc> samples_cpu(curr_batch_size);
for (int sample_idx = 0; sample_idx < curr_batch_size; sample_idx++) {
auto &sample = samples_cpu[sample_idx];
sample.in = input.tensor<InputType>(sample_idx);
sample.out = output.mutable_tensor<OutputType>(sample_idx);
sample.size = volume(input.tensor_shape(sample_idx));
sample.coeff = preemph_coeff_[sample_idx];
}
int64_t sz = curr_batch_size * sizeof(SampleDesc);
scratch_mem_.set_type(TypeTable::GetTypeInfo(DALI_UINT8));
scratch_mem_.Resize({sz});
auto sample_descs_gpu = reinterpret_cast<SampleDesc*>(scratch_mem_.mutable_data<uint8_t>());
auto stream = ws.stream();
CUDA_CALL(
hipMemcpyAsync(sample_descs_gpu, samples_cpu.data(), sz, hipMemcpyHostToDevice, stream));
int block = 256;
auto blocks_per_sample = ::max(32, 1024 / curr_batch_size);
dim3 grid(blocks_per_sample, curr_batch_size);
hipLaunchKernelGGL(( detail::PreemphasisFilterKernel), dim3(grid), dim3(block), 0, stream, sample_descs_gpu, border_type_);
}
void PreemphasisFilterGPU::RunImpl(workspace_t<GPUBackend> &ws) {
const auto &input = ws.template InputRef<GPUBackend>(0);
TYPE_SWITCH(input.type().id(), type2id, InputType, PREEMPH_TYPES, (
TYPE_SWITCH(output_type_, type2id, OutputType, PREEMPH_TYPES, (
RunImplTyped<OutputType, InputType>(ws);
), DALI_FAIL(make_string("Unsupported output type: ", output_type_))); // NOLINT
), DALI_FAIL(make_string("Unsupported input type: ", input.type().id()))); // NOLINT
}
DALI_REGISTER_OPERATOR(PreemphasisFilter, PreemphasisFilterGPU, GPU);
} // namespace dali
| e7ef495583585e103f8c20b7f2e5a8d53ac487a5.cu | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/audio/preemphasis_filter_op.h"
#include <vector>
#include "dali/pipeline/data/types.h"
namespace dali {
namespace detail {
template <typename OutputType, typename InputType>
struct SampleDescriptor {
const InputType *in;
OutputType *out;
float coeff;
int64_t size;
};
using BorderType = PreemphasisFilter<GPUBackend>::BorderType;
template <typename OutputType, typename InputType>
void __global__ PreemphasisFilterKernel(const SampleDescriptor<OutputType, InputType> *samples,
BorderType border_type) {
const auto &sample = samples[blockIdx.y];
int64_t block_size = blockDim.x;
int64_t block_start = block_size * blockIdx.x;
int64_t grid_stride = block_size * gridDim.x;
int64_t k = block_start + threadIdx.x;
if (k >= sample.size)
return;
if (k == 0) {
if (border_type == BorderType::Zero) {
sample.out[k] = sample.in[k];
} else {
// BorderType::Reflect or BorderType::Clamp
InputType border = (border_type == BorderType::Reflect) ? sample.in[1] : sample.in[0];
sample.out[k] = sample.in[k] - sample.coeff * border;
}
k += grid_stride;
}
for (; k < sample.size; k += grid_stride)
sample.out[k] = sample.in[k] - sample.coeff * sample.in[k-1];
}
} // namespace detail
class PreemphasisFilterGPU : public PreemphasisFilter<GPUBackend> {
public:
explicit PreemphasisFilterGPU(const OpSpec &spec) : PreemphasisFilter<GPUBackend>(spec) {
// void is OK here, pointer sizes are the same size
int64_t sz = max_batch_size_ * sizeof(detail::SampleDescriptor<void, void>);
scratch_mem_.set_type(TypeTable::GetTypeInfo(DALI_UINT8));
scratch_mem_.Resize({sz});
}
void RunImpl(workspace_t<GPUBackend> &ws) override;
private:
template <typename OutputType, typename InputType>
void RunImplTyped(workspace_t<GPUBackend> &ws);
Tensor<GPUBackend> scratch_mem_;
};
template <typename OutputType, typename InputType>
void PreemphasisFilterGPU::RunImplTyped(workspace_t<GPUBackend> &ws) {
using SampleDesc = detail::SampleDescriptor<OutputType, InputType>;
const auto &input = ws.InputRef<GPUBackend>(0);
auto &output = ws.OutputRef<GPUBackend>(0);
auto curr_batch_size = ws.GetInputBatchSize(0);
std::vector<SampleDesc> samples_cpu(curr_batch_size);
for (int sample_idx = 0; sample_idx < curr_batch_size; sample_idx++) {
auto &sample = samples_cpu[sample_idx];
sample.in = input.tensor<InputType>(sample_idx);
sample.out = output.mutable_tensor<OutputType>(sample_idx);
sample.size = volume(input.tensor_shape(sample_idx));
sample.coeff = preemph_coeff_[sample_idx];
}
int64_t sz = curr_batch_size * sizeof(SampleDesc);
scratch_mem_.set_type(TypeTable::GetTypeInfo(DALI_UINT8));
scratch_mem_.Resize({sz});
auto sample_descs_gpu = reinterpret_cast<SampleDesc*>(scratch_mem_.mutable_data<uint8_t>());
auto stream = ws.stream();
CUDA_CALL(
cudaMemcpyAsync(sample_descs_gpu, samples_cpu.data(), sz, cudaMemcpyHostToDevice, stream));
int block = 256;
auto blocks_per_sample = std::max(32, 1024 / curr_batch_size);
dim3 grid(blocks_per_sample, curr_batch_size);
detail::PreemphasisFilterKernel<<<grid, block, 0, stream>>>(sample_descs_gpu, border_type_);
}
void PreemphasisFilterGPU::RunImpl(workspace_t<GPUBackend> &ws) {
const auto &input = ws.template InputRef<GPUBackend>(0);
TYPE_SWITCH(input.type().id(), type2id, InputType, PREEMPH_TYPES, (
TYPE_SWITCH(output_type_, type2id, OutputType, PREEMPH_TYPES, (
RunImplTyped<OutputType, InputType>(ws);
), DALI_FAIL(make_string("Unsupported output type: ", output_type_))); // NOLINT
), DALI_FAIL(make_string("Unsupported input type: ", input.type().id()))); // NOLINT
}
DALI_REGISTER_OPERATOR(PreemphasisFilter, PreemphasisFilterGPU, GPU);
} // namespace dali
|
bd4a282bc769149588a8db98280b7ac4b6157a78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_SOFTMAX_LAYER_INSTANTIATE
#include "lbann/layers/activations/softmax_impl.hpp"
#include "lbann/utils/gpu/helpers.hpp"
#ifdef LBANN_HAS_DNN_LIB
#include "lbann/utils/dnn_lib/softmax.hpp"
#endif // LBANN_HAS_DNN_LIB
#ifdef LBANN_HAS_DISTCONV
#include "lbann/layers/data_type_distconv_adapter.hpp"
#endif // LBANN_HAS_DISTCONV
namespace lbann {
namespace {
#ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD
/** Functor to ensure values are above threshold value */
template <typename TensorDataType>
struct threshold_op
{
__forceinline__ __device__ TensorDataType
operator()(const TensorDataType& y) const
{
return gpu_lib::max(y, gpu_lib::sqrt(gpu_lib::min<TensorDataType>()));
}
};
#endif // LBANN_ENABLE_SOFTMAX_THRESHOLD
/** @brief Max functor */
template <class T>
struct max_op
{
__device__ __forceinline__ DataType operator()(const T& x1, const T& x2) const
{
return gpu_lib::max(x1, x2);
}
};
/** @brief Kernel for max reduction on matrix columns
*
* Each CUDA block computes the max over a subset of matrix entries
* and outputs the result. This is repeated multiple times for
* column-wise max reduction.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param values (height x width) matrix
* @param max_values (nblocksx x width) matrix
*/
template <size_t bsize, typename TensorDataType>
__global__ void reduce_max_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ values,
size_t values_ldim,
TensorDataType* __restrict__ max_values)
{
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Find largest value for each thread
TensorDataType thread_max_val{-gpu_lib::infinity<TensorDataType>()};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& val = values[row + col * values_ldim];
thread_max_val = gpu_lib::max(thread_max_val, val);
}
// Find largest value for each block
const TensorDataType block_max_val =
gpu_lib::block_reduce<bsize, 1, 1, DataType, max_op<DataType>>(
thread_max_val);
if (tid == 0) {
max_values[bidx + col * nblocksx] = block_max_val;
}
}
}
/** @brief Compute exp(x-shift)
*
* Also compute sum(exp(x-shift)) for each matrix column.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*/
template <size_t bsize, typename TensorDataType>
__global__ void fp_exp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ shifts,
TensorDataType* __restrict__ sums)
{
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
const auto& shift = shifts[col];
// Exponentiate inputs and compute sum for each thread
TensorDataType thread_sum{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& x = input[row + col * input_ldim];
auto& y = output[row + col * output_ldim];
y = gpu_lib::exp(x - shift);
thread_sum += y;
}
// Compute sum for each block
const TensorDataType block_sum =
gpu_lib::block_reduce<bsize, 1, 1>(thread_sum);
if (tid == 0) {
gpu_lib::atomic_add(&sums[col], block_sum);
}
}
}
/** @brief Compute layer output
*
* y = exp(x-shift) / sum(exp(x-shift))
*
* If @c LBANN_ENABLE_SOFTMAX_THRESHOLD is set, small values are
* thresholded to a minimum value to avoid denormalized floats.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param output On input, constains exp(x-shift). On output,
* contains the layer output.
* @param sums sum(exp(x-shift)) for each column
*/
template <typename TensorDataType>
__global__ void fp_output_kernel(size_t height,
size_t width,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ sums)
{
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& denom = sums[col];
for (size_t row = gidx; row < height; row += nthreadsx) {
auto& y = output[row + col * output_ldim];
y /= denom;
#ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD
y = gpu_lib::max(y, gpu_lib::sqrt(gpu_lib::min<TensorDataType>()));
#endif // LBANN_ENABLE_SOFTMAX_THRESHOLD
}
}
}
/** @brief Compute dot(y,dy) for each matrix column
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*/
template <size_t bsize, typename TensorDataType>
__global__ void
bp_dot_product_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
TensorDataType* __restrict__ dot_products)
{
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Compute dot product contribution for each thread
TensorDataType thread_dot_product{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& y = output[row + col * output_ldim];
const auto& dy =
gradient_wrt_output[row + col * gradient_wrt_output_ldim];
thread_dot_product += y * dy;
}
// Compute dot product contribution for each block
const TensorDataType block_dot_product =
gpu_lib::block_reduce<bsize, 1, 1>(thread_dot_product);
if (tid == 0) {
gpu_lib::atomic_add(&dot_products[col], block_dot_product);
}
}
}
/** @brief Compute gradient w.r.t. input
*
* dx = y * (dy - dot(y,dy))
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param dot_products dot(y,dy) for each matrix column
*/
template <size_t bsize, typename TensorDataType>
__global__ void
bp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
const TensorDataType* __restrict__ dot_products,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim)
{
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& y_dot_dy = dot_products[col];
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& y = output[row + col * output_ldim];
const auto& dy =
gradient_wrt_output[row + col * gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim];
dx = y * (dy - y_dot_dy);
}
}
}
#ifdef LBANN_HAS_DISTCONV
template <typename TensorDataType, data_layout Layout, El::Device Device>
void fp_compute_distconv(
softmax_distconv_adapter<TensorDataType, Layout, Device>& dc)
{
dc.m_softmax->forward(dc.get_prev_activations(), dc.get_activations());
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void bp_compute_distconv(
softmax_distconv_adapter<TensorDataType, Layout, Device>& dc)
{
dc.m_softmax->backward(dc.get_activations(),
dc.get_prev_error_signals(),
dc.get_error_signals());
}
#endif // LBANN_HAS_DISTCONV
} // namespace
template <typename TensorDataType>
void fp_compute_impl(
softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l)
{
#ifdef LBANN_HAS_DISTCONV
if (l.distconv_enabled()) {
fp_compute_distconv(l.get_distconv_adapter());
return;
}
#endif // LBANN_HAS_DISTCONV
const dnn_lib::ScalingParamType<TensorDataType> zero = 0.;
const dnn_lib::ScalingParamType<TensorDataType> one = 1.;
const auto& local_input =
dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_prev_activations());
auto& local_output =
dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_activations());
if (!local_input.IsEmpty()) {
dnn_lib::softmax_forward(one,
l.m_tensors_dnn_desc.get_prev_activations(),
local_input,
zero,
l.m_tensors_dnn_desc.get_activations(),
local_output,
l.m_mode);
#ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD
gpu_lib::apply_entrywise_unary_operator<threshold_op>(local_output,
local_output);
#endif // LBANN_ENABLE_SOFTMAX_THRESHOLD
}
}
template <typename TensorDataType>
void bp_compute_impl(
softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l)
{
#ifdef LBANN_HAS_DISTCONV
if (l.distconv_enabled()) {
bp_compute_distconv(l.get_distconv_adapter());
return;
}
#endif // LBANN_HAS_DISTCONV
const dnn_lib::ScalingParamType<TensorDataType> zero = 0.;
const dnn_lib::ScalingParamType<TensorDataType> one = 1.;
const auto& local_output =
dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_activations());
const auto& local_gradient_wrt_output =
dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_prev_error_signals());
auto& local_gradient_wrt_input =
dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_error_signals());
dnn_lib::softmax_backward(one,
l.m_tensors_dnn_desc.get_activations(),
local_output,
l.m_tensors_dnn_desc.get_prev_error_signals(),
local_gradient_wrt_output,
zero,
l.m_tensors_dnn_desc.get_error_signals(),
local_gradient_wrt_input,
l.m_mode);
}
template <typename TensorDataType>
void fp_compute_impl(
softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>&
l)
{
if (l.m_mode != softmax_mode::INSTANCE) {
LBANN_ERROR("Unsupported softmax mode");
}
// Setup workspace
l.m_workspace->Empty(false);
l.m_workspace->AlignWith(l.get_activations());
l.m_workspace->Resize(1, l.get_activations().Width());
// Local matrices
const auto& local_input =
dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_prev_activations());
auto& local_output =
dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_activations());
auto& local_workspace =
dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(
l.m_workspace->Matrix());
const size_t local_height = local_input.Height();
const size_t local_width = local_input.Width();
// GPU objects
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input),
gpu::get_sync_info(local_output),
gpu::get_sync_info(local_workspace));
// The comm templates will not convert the multisync, so cast the multisync
// and use sync_info for comms.
El::SyncInfo<El::Device::GPU> const& sync_info = multisync;
// Find max value in each column
gpu_lib::thrust::vector<TensorDataType> max_vals;
if (local_output.IsEmpty()) {
max_vals.resize(local_width,
-std::numeric_limits<TensorDataType>::infinity());
}
else {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
max_vals.resize(grid_dims.x * local_width);
hydrogen::gpu::LaunchKernel(reduce_max_kernel<block_size, TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
local_height,
local_width,
local_input.LockedBuffer(),
local_input.LDim(),
max_vals.data().get());
while (grid_dims.x > 1) {
const size_t prev_height = grid_dims.x;
grid_dims.x = (prev_height + block_size - 1) / block_size;
gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(max_vals));
max_vals.resize(grid_dims.x * local_width);
hydrogen::gpu::LaunchKernel(reduce_max_kernel<block_size, TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
prev_height,
local_width,
prev_vals.data().get(),
prev_height,
max_vals.data().get());
}
}
El::mpi::AllReduce(max_vals.data().get(),
max_vals.size(),
El::mpi::MAX,
l.m_workspace->RedundantComm(),
sync_info);
// Compute exp(x-max_val) and sum(exp(x-max_val))
El::Zero(*l.m_workspace);
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(fp_exp_kernel<block_size, TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
local_height,
local_width,
local_input.LockedBuffer(),
local_input.LDim(),
local_output.Buffer(),
local_output.LDim(),
max_vals.data().get(),
local_workspace.Buffer());
}
El::AllReduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute output
// Note: y = exp(x-max_val) / sum(exp(x-max_val))
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(fp_output_kernel<TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
local_height,
local_width,
local_output.Buffer(),
local_output.LDim(),
local_workspace.LockedBuffer());
}
}
template <typename TensorDataType>
void bp_compute_impl(
softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>&
l)
{
if (l.m_mode != softmax_mode::INSTANCE) {
LBANN_ERROR("Unsupported softmax mode");
}
// Local matrices
const auto& local_output =
dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_activations());
const auto& local_gradient_wrt_output =
dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_prev_error_signals());
auto& local_gradient_wrt_input =
dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_error_signals());
auto& local_workspace =
dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(
l.m_workspace->Matrix());
const auto& local_height = local_output.Height();
const auto& local_width = local_output.Width();
// GPU objects
auto multisync =
El::MakeMultiSync(gpu::get_sync_info(local_output),
gpu::get_sync_info(local_gradient_wrt_output),
gpu::get_sync_info(local_gradient_wrt_input),
gpu::get_sync_info(local_workspace));
// Compute dot(y,dy)
El::Zero(local_workspace);
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
bp_dot_product_kernel<block_size, TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
local_height,
local_width,
local_output.LockedBuffer(),
local_output.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.Buffer());
}
El::AllReduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute gradient w.r.t. input
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(bp_kernel<block_size, TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
local_height,
local_width,
local_output.LockedBuffer(),
local_output.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.Buffer(),
local_gradient_wrt_input.Buffer(),
local_gradient_wrt_input.LDim());
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void softmax_layer<TensorDataType, Layout, Device>::fp_compute()
{
fp_compute_impl(*this);
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void softmax_layer<TensorDataType, Layout, Device>::bp_compute()
{
bp_compute_impl(*this);
}
// Template instantiation
#define PROTO(T) \
template class softmax_layer<T, \
data_layout::DATA_PARALLEL, \
El::Device::GPU>; \
template class softmax_layer<T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| bd4a282bc769149588a8db98280b7ac4b6157a78.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_SOFTMAX_LAYER_INSTANTIATE
#include "lbann/layers/activations/softmax_impl.hpp"
#include "lbann/utils/gpu/helpers.hpp"
#ifdef LBANN_HAS_DNN_LIB
#include "lbann/utils/dnn_lib/softmax.hpp"
#endif // LBANN_HAS_DNN_LIB
#ifdef LBANN_HAS_DISTCONV
#include "lbann/layers/data_type_distconv_adapter.hpp"
#endif // LBANN_HAS_DISTCONV
namespace lbann {
namespace {
#ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD
/** Functor to ensure values are above threshold value */
template <typename TensorDataType>
struct threshold_op
{
__forceinline__ __device__ TensorDataType
operator()(const TensorDataType& y) const
{
return gpu_lib::max(y, gpu_lib::sqrt(gpu_lib::min<TensorDataType>()));
}
};
#endif // LBANN_ENABLE_SOFTMAX_THRESHOLD
/** @brief Max functor */
template <class T>
struct max_op
{
__device__ __forceinline__ DataType operator()(const T& x1, const T& x2) const
{
return gpu_lib::max(x1, x2);
}
};
/** @brief Kernel for max reduction on matrix columns
*
* Each CUDA block computes the max over a subset of matrix entries
* and outputs the result. This is repeated multiple times for
* column-wise max reduction.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param values (height x width) matrix
* @param max_values (nblocksx x width) matrix
*/
template <size_t bsize, typename TensorDataType>
__global__ void reduce_max_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ values,
size_t values_ldim,
TensorDataType* __restrict__ max_values)
{
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Find largest value for each thread
TensorDataType thread_max_val{-gpu_lib::infinity<TensorDataType>()};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& val = values[row + col * values_ldim];
thread_max_val = gpu_lib::max(thread_max_val, val);
}
// Find largest value for each block
const TensorDataType block_max_val =
gpu_lib::block_reduce<bsize, 1, 1, DataType, max_op<DataType>>(
thread_max_val);
if (tid == 0) {
max_values[bidx + col * nblocksx] = block_max_val;
}
}
}
/** @brief Compute exp(x-shift)
*
* Also compute sum(exp(x-shift)) for each matrix column.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*/
template <size_t bsize, typename TensorDataType>
__global__ void fp_exp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ shifts,
TensorDataType* __restrict__ sums)
{
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
const auto& shift = shifts[col];
// Exponentiate inputs and compute sum for each thread
TensorDataType thread_sum{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& x = input[row + col * input_ldim];
auto& y = output[row + col * output_ldim];
y = gpu_lib::exp(x - shift);
thread_sum += y;
}
// Compute sum for each block
const TensorDataType block_sum =
gpu_lib::block_reduce<bsize, 1, 1>(thread_sum);
if (tid == 0) {
gpu_lib::atomic_add(&sums[col], block_sum);
}
}
}
/** @brief Compute layer output
*
* y = exp(x-shift) / sum(exp(x-shift))
*
* If @c LBANN_ENABLE_SOFTMAX_THRESHOLD is set, small values are
* thresholded to a minimum value to avoid denormalized floats.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param output On input, constains exp(x-shift). On output,
* contains the layer output.
* @param sums sum(exp(x-shift)) for each column
*/
template <typename TensorDataType>
__global__ void fp_output_kernel(size_t height,
size_t width,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ sums)
{
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& denom = sums[col];
for (size_t row = gidx; row < height; row += nthreadsx) {
auto& y = output[row + col * output_ldim];
y /= denom;
#ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD
y = gpu_lib::max(y, gpu_lib::sqrt(gpu_lib::min<TensorDataType>()));
#endif // LBANN_ENABLE_SOFTMAX_THRESHOLD
}
}
}
/** @brief Compute dot(y,dy) for each matrix column
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*/
template <size_t bsize, typename TensorDataType>
__global__ void
bp_dot_product_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
TensorDataType* __restrict__ dot_products)
{
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Compute dot product contribution for each thread
TensorDataType thread_dot_product{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& y = output[row + col * output_ldim];
const auto& dy =
gradient_wrt_output[row + col * gradient_wrt_output_ldim];
thread_dot_product += y * dy;
}
// Compute dot product contribution for each block
const TensorDataType block_dot_product =
gpu_lib::block_reduce<bsize, 1, 1>(thread_dot_product);
if (tid == 0) {
gpu_lib::atomic_add(&dot_products[col], block_dot_product);
}
}
}
/** @brief Compute gradient w.r.t. input
*
* dx = y * (dy - dot(y,dy))
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param dot_products dot(y,dy) for each matrix column
*/
template <size_t bsize, typename TensorDataType>
__global__ void
bp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
const TensorDataType* __restrict__ dot_products,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim)
{
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& y_dot_dy = dot_products[col];
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& y = output[row + col * output_ldim];
const auto& dy =
gradient_wrt_output[row + col * gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim];
dx = y * (dy - y_dot_dy);
}
}
}
#ifdef LBANN_HAS_DISTCONV
template <typename TensorDataType, data_layout Layout, El::Device Device>
void fp_compute_distconv(
softmax_distconv_adapter<TensorDataType, Layout, Device>& dc)
{
dc.m_softmax->forward(dc.get_prev_activations(), dc.get_activations());
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void bp_compute_distconv(
softmax_distconv_adapter<TensorDataType, Layout, Device>& dc)
{
dc.m_softmax->backward(dc.get_activations(),
dc.get_prev_error_signals(),
dc.get_error_signals());
}
#endif // LBANN_HAS_DISTCONV
} // namespace
template <typename TensorDataType>
void fp_compute_impl(
softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l)
{
#ifdef LBANN_HAS_DISTCONV
if (l.distconv_enabled()) {
fp_compute_distconv(l.get_distconv_adapter());
return;
}
#endif // LBANN_HAS_DISTCONV
const dnn_lib::ScalingParamType<TensorDataType> zero = 0.;
const dnn_lib::ScalingParamType<TensorDataType> one = 1.;
const auto& local_input =
dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_prev_activations());
auto& local_output =
dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_activations());
if (!local_input.IsEmpty()) {
dnn_lib::softmax_forward(one,
l.m_tensors_dnn_desc.get_prev_activations(),
local_input,
zero,
l.m_tensors_dnn_desc.get_activations(),
local_output,
l.m_mode);
#ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD
gpu_lib::apply_entrywise_unary_operator<threshold_op>(local_output,
local_output);
#endif // LBANN_ENABLE_SOFTMAX_THRESHOLD
}
}
template <typename TensorDataType>
void bp_compute_impl(
softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l)
{
#ifdef LBANN_HAS_DISTCONV
if (l.distconv_enabled()) {
bp_compute_distconv(l.get_distconv_adapter());
return;
}
#endif // LBANN_HAS_DISTCONV
const dnn_lib::ScalingParamType<TensorDataType> zero = 0.;
const dnn_lib::ScalingParamType<TensorDataType> one = 1.;
const auto& local_output =
dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_activations());
const auto& local_gradient_wrt_output =
dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_prev_error_signals());
auto& local_gradient_wrt_input =
dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_error_signals());
dnn_lib::softmax_backward(one,
l.m_tensors_dnn_desc.get_activations(),
local_output,
l.m_tensors_dnn_desc.get_prev_error_signals(),
local_gradient_wrt_output,
zero,
l.m_tensors_dnn_desc.get_error_signals(),
local_gradient_wrt_input,
l.m_mode);
}
template <typename TensorDataType>
void fp_compute_impl(
softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>&
l)
{
if (l.m_mode != softmax_mode::INSTANCE) {
LBANN_ERROR("Unsupported softmax mode");
}
// Setup workspace
l.m_workspace->Empty(false);
l.m_workspace->AlignWith(l.get_activations());
l.m_workspace->Resize(1, l.get_activations().Width());
// Local matrices
const auto& local_input =
dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_prev_activations());
auto& local_output =
dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_activations());
auto& local_workspace =
dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(
l.m_workspace->Matrix());
const size_t local_height = local_input.Height();
const size_t local_width = local_input.Width();
// GPU objects
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input),
gpu::get_sync_info(local_output),
gpu::get_sync_info(local_workspace));
// The comm templates will not convert the multisync, so cast the multisync
// and use sync_info for comms.
El::SyncInfo<El::Device::GPU> const& sync_info = multisync;
// Find max value in each column
gpu_lib::thrust::vector<TensorDataType> max_vals;
if (local_output.IsEmpty()) {
max_vals.resize(local_width,
-std::numeric_limits<TensorDataType>::infinity());
}
else {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
max_vals.resize(grid_dims.x * local_width);
hydrogen::gpu::LaunchKernel(reduce_max_kernel<block_size, TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
local_height,
local_width,
local_input.LockedBuffer(),
local_input.LDim(),
max_vals.data().get());
while (grid_dims.x > 1) {
const size_t prev_height = grid_dims.x;
grid_dims.x = (prev_height + block_size - 1) / block_size;
gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(max_vals));
max_vals.resize(grid_dims.x * local_width);
hydrogen::gpu::LaunchKernel(reduce_max_kernel<block_size, TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
prev_height,
local_width,
prev_vals.data().get(),
prev_height,
max_vals.data().get());
}
}
El::mpi::AllReduce(max_vals.data().get(),
max_vals.size(),
El::mpi::MAX,
l.m_workspace->RedundantComm(),
sync_info);
// Compute exp(x-max_val) and sum(exp(x-max_val))
El::Zero(*l.m_workspace);
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(fp_exp_kernel<block_size, TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
local_height,
local_width,
local_input.LockedBuffer(),
local_input.LDim(),
local_output.Buffer(),
local_output.LDim(),
max_vals.data().get(),
local_workspace.Buffer());
}
El::AllReduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute output
// Note: y = exp(x-max_val) / sum(exp(x-max_val))
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(fp_output_kernel<TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
local_height,
local_width,
local_output.Buffer(),
local_output.LDim(),
local_workspace.LockedBuffer());
}
}
template <typename TensorDataType>
void bp_compute_impl(
softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>&
l)
{
if (l.m_mode != softmax_mode::INSTANCE) {
LBANN_ERROR("Unsupported softmax mode");
}
// Local matrices
const auto& local_output =
dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_activations());
const auto& local_gradient_wrt_output =
dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_prev_error_signals());
auto& local_gradient_wrt_input =
dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(
l.get_local_error_signals());
auto& local_workspace =
dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(
l.m_workspace->Matrix());
const auto& local_height = local_output.Height();
const auto& local_width = local_output.Width();
// GPU objects
auto multisync =
El::MakeMultiSync(gpu::get_sync_info(local_output),
gpu::get_sync_info(local_gradient_wrt_output),
gpu::get_sync_info(local_gradient_wrt_input),
gpu::get_sync_info(local_workspace));
// Compute dot(y,dy)
El::Zero(local_workspace);
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
bp_dot_product_kernel<block_size, TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
local_height,
local_width,
local_output.LockedBuffer(),
local_output.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.Buffer());
}
El::AllReduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute gradient w.r.t. input
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(bp_kernel<block_size, TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
local_height,
local_width,
local_output.LockedBuffer(),
local_output.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.Buffer(),
local_gradient_wrt_input.Buffer(),
local_gradient_wrt_input.LDim());
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void softmax_layer<TensorDataType, Layout, Device>::fp_compute()
{
fp_compute_impl(*this);
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void softmax_layer<TensorDataType, Layout, Device>::bp_compute()
{
bp_compute_impl(*this);
}
// Template instantiation
#define PROTO(T) \
template class softmax_layer<T, \
data_layout::DATA_PARALLEL, \
El::Device::GPU>; \
template class softmax_layer<T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
c2f7ee6ed2bb84374f75f25fdd4af9c286dcf32b.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <cassert>
#include <hipcub/hipcub.hpp> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
// Dynamic shape plugin requires TRT version greater than 6.0.
#if IS_TRT_VERSION_GE(6000)
template <typename T>
EmbEltwiseLayernormPluginDynamicImpl<
T>::~EmbEltwiseLayernormPluginDynamicImpl() {}
inline half fp32tofp16(float x) { return static_cast<half>(x); }
template <typename T>
void EmbEltwiseLayernormPluginDynamicImpl<T>::shareGPUData(
const EmbEltwiseLayernormPluginDynamicImplBase *anthor) {
auto *ptr =
dynamic_cast<const EmbEltwiseLayernormPluginDynamicImpl<T> *>(anthor);
if (!ptr->is_initialized_) {
return;
}
embs_gpu_ = ptr->embs_gpu_;
scale_gpu_ = ptr->scale_gpu_;
bias_gpu_ = ptr->bias_gpu_;
int input_num = embs_.size();
in_ptr_tensor_.Resize({input_num});
emb_ptr_tensor_.ShareDataWith(ptr->emb_ptr_tensor_);
}
template <typename T>
int EmbEltwiseLayernormPluginDynamicImpl<T>::initialize() {
if (is_initialized_) {
return 0;
}
embs_gpu_.resize(embs_.size());
for (int i = 0; i < embs_.size(); i++) {
if (embs_[i]) {
T *host_ptr;
auto size = emb_sizes_[i];
if (std::is_same<T, half>::value) {
host_ptr = new T[size];
std::transform(embs_[i], (embs_[i] + size), host_ptr, fp32tofp16);
} else {
host_ptr = reinterpret_cast<T *>(embs_[i]);
}
hipMalloc(&embs_gpu_[i], sizeof(T) * size);
hipMemcpy(embs_gpu_[i], host_ptr, size * sizeof(T),
hipMemcpyHostToDevice);
if (std::is_same<T, half>::value) {
delete[] host_ptr;
}
}
}
if (bias_) {
hipMalloc(&bias_gpu_, sizeof(float) * bias_size_);
hipMemcpy(bias_gpu_, bias_, bias_size_ * sizeof(float),
hipMemcpyHostToDevice);
}
if (scale_) {
hipMalloc(&scale_gpu_, sizeof(float) * scale_size_);
hipMemcpy(scale_gpu_, scale_, scale_size_ * sizeof(float),
hipMemcpyHostToDevice);
}
int input_num = embs_.size();
in_ptr_tensor_.Resize({input_num});
emb_ptr_tensor_.Resize({input_num});
hipGetDevice(&device_id_);
auto emb_ptr_gpu_d =
emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_));
hipMemcpy(emb_ptr_gpu_d, embs_gpu_.data(), sizeof(uintptr_t) * input_num,
hipMemcpyHostToDevice);
is_initialized_ = true;
return 0;
}
template <typename T>
void EmbEltwiseLayernormPluginDynamicImpl<T>::terminate() {
for (int i = 0; i < embs_gpu_.size(); ++i) {
if (embs_gpu_[i]) {
hipFree(embs_gpu_[i]);
embs_gpu_[i] = nullptr;
}
}
if (bias_gpu_) {
hipFree(bias_gpu_);
bias_gpu_ = nullptr;
}
if (scale_gpu_) {
hipFree(scale_gpu_);
scale_gpu_ = nullptr;
}
}
template <typename T>
int EmbEltwiseLayernormPluginDynamicImpl<T>::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs,
void *const *outputs, void *workspace, hipStream_t stream) TRT_NOEXCEPT {
auto id_dims = input_desc[0].dims;
int batch = id_dims.d[0];
int seq_len = id_dims.d[1];
int input_num = embs_.size();
hipGetDevice(&device_id_);
auto in_ptr_gpu_d =
in_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_));
auto emb_ptr_gpu_d =
emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_));
hipMemcpyAsync(in_ptr_gpu_d, reinterpret_cast<const void *>(inputs),
sizeof(uintptr_t) * input_num, hipMemcpyHostToDevice,
stream);
auto out_type = output_desc[0].type;
if (std::is_same<T, float>::value) {
PADDLE_ENFORCE_EQ(
out_type == nvinfer1::DataType::kFLOAT, true,
platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only support fp32 input."));
} else if (std::is_same<T, half>::value) {
PADDLE_ENFORCE_EQ(
out_type == nvinfer1::DataType::kHALF, true,
platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only support fp16 input."));
} else {
PADDLE_THROW(platform::errors::Fatal(
"Unsupport data type, the out type of EmbEltwiseLayernorm should be "
"float or half."));
}
auto *output_d = reinterpret_cast<T *>(outputs[0]);
operators::math::EmbEltwiseLayerNormFunctor<T> emb_eltwise_layernorm_func;
emb_eltwise_layernorm_func(batch, seq_len, hidden_size_, in_ptr_gpu_d,
scale_gpu_, bias_gpu_, emb_ptr_gpu_d, output_d,
eps_, input_num, stream);
return hipGetLastError() != hipSuccess;
}
template class EmbEltwiseLayernormPluginDynamicImpl<float>;
#ifdef TRT_PLUGIN_FP16_AVALIABLE
template class EmbEltwiseLayernormPluginDynamicImpl<half>;
#endif
int EmbEltwiseLayernormPluginDynamic::initialize() TRT_NOEXCEPT {
impl_->initialize();
return 0;
}
void EmbEltwiseLayernormPluginDynamic::terminate() TRT_NOEXCEPT {
impl_->terminate();
}
nvinfer1::DimsExprs EmbEltwiseLayernormPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { // NOLINT
PADDLE_ENFORCE_EQ(output_index, 0,
platform::errors::InvalidArgument(
"There is only one output of the EmbEltwiseLayernorm, "
"so the index should be zero,"
"but it's (%d)",
output_index));
nvinfer1::DimsExprs ret;
ret.nbDims = 3;
ret.d[0] = inputs[0].d[0];
ret.d[1] = inputs[0].d[1];
ret.d[2] = expr_builder.constant(hidden_size_);
return ret;
}
bool EmbEltwiseLayernormPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_EQ(nb_outputs, 1,
platform::errors::InvalidArgument(
"The EmbEltwiseLayerNorm's output should be one"
"but it's (%d) outputs.",
nb_outputs));
PADDLE_ENFORCE_EQ(nb_outputs, 1,
platform::errors::InvalidArgument(
"The EmbEltwiseLayerNorm's output should be one"
"but it's (%d) outputs.",
nb_outputs));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
int all_nums = nb_inputs + nb_outputs;
const nvinfer1::PluginTensorDesc &desc = in_out[pos];
if (desc.format != nvinfer1::TensorFormat::kLINEAR) {
return false;
}
if (pos == 0) {
return desc.type == nvinfer1::DataType::kINT32;
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
if (pos < all_nums - 1) {
return desc.type == nvinfer1::DataType::kINT32 &&
desc.dims.d[0] == prev.dims.d[0] && desc.dims.d[1] == prev.dims.d[1];
}
if (pos == all_nums - 1) {
if (with_fp16_ == false) {
return desc.type == nvinfer1::DataType::kFLOAT;
} else {
return desc.type == nvinfer1::DataType::kHALF;
}
}
return false;
}
nvinfer1::DataType EmbEltwiseLayernormPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(
index, 0, platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
if (with_fp16_)
return nvinfer1::DataType::kHALF;
else
return nvinfer1::DataType::kFLOAT;
}
int EmbEltwiseLayernormPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs,
void *const *outputs, void *workspace, hipStream_t stream) TRT_NOEXCEPT {
impl_->enqueue(input_desc, output_desc, inputs, outputs, workspace, stream);
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| c2f7ee6ed2bb84374f75f25fdd4af9c286dcf32b.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <cassert>
#include <cub/cub.cuh> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
// Dynamic shape plugin requires TRT version greater than 6.0.
#if IS_TRT_VERSION_GE(6000)
template <typename T>
EmbEltwiseLayernormPluginDynamicImpl<
T>::~EmbEltwiseLayernormPluginDynamicImpl() {}
inline half fp32tofp16(float x) { return static_cast<half>(x); }
template <typename T>
void EmbEltwiseLayernormPluginDynamicImpl<T>::shareGPUData(
const EmbEltwiseLayernormPluginDynamicImplBase *anthor) {
auto *ptr =
dynamic_cast<const EmbEltwiseLayernormPluginDynamicImpl<T> *>(anthor);
if (!ptr->is_initialized_) {
return;
}
embs_gpu_ = ptr->embs_gpu_;
scale_gpu_ = ptr->scale_gpu_;
bias_gpu_ = ptr->bias_gpu_;
int input_num = embs_.size();
in_ptr_tensor_.Resize({input_num});
emb_ptr_tensor_.ShareDataWith(ptr->emb_ptr_tensor_);
}
template <typename T>
int EmbEltwiseLayernormPluginDynamicImpl<T>::initialize() {
if (is_initialized_) {
return 0;
}
embs_gpu_.resize(embs_.size());
for (int i = 0; i < embs_.size(); i++) {
if (embs_[i]) {
T *host_ptr;
auto size = emb_sizes_[i];
if (std::is_same<T, half>::value) {
host_ptr = new T[size];
std::transform(embs_[i], (embs_[i] + size), host_ptr, fp32tofp16);
} else {
host_ptr = reinterpret_cast<T *>(embs_[i]);
}
cudaMalloc(&embs_gpu_[i], sizeof(T) * size);
cudaMemcpy(embs_gpu_[i], host_ptr, size * sizeof(T),
cudaMemcpyHostToDevice);
if (std::is_same<T, half>::value) {
delete[] host_ptr;
}
}
}
if (bias_) {
cudaMalloc(&bias_gpu_, sizeof(float) * bias_size_);
cudaMemcpy(bias_gpu_, bias_, bias_size_ * sizeof(float),
cudaMemcpyHostToDevice);
}
if (scale_) {
cudaMalloc(&scale_gpu_, sizeof(float) * scale_size_);
cudaMemcpy(scale_gpu_, scale_, scale_size_ * sizeof(float),
cudaMemcpyHostToDevice);
}
int input_num = embs_.size();
in_ptr_tensor_.Resize({input_num});
emb_ptr_tensor_.Resize({input_num});
cudaGetDevice(&device_id_);
auto emb_ptr_gpu_d =
emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_));
cudaMemcpy(emb_ptr_gpu_d, embs_gpu_.data(), sizeof(uintptr_t) * input_num,
cudaMemcpyHostToDevice);
is_initialized_ = true;
return 0;
}
template <typename T>
void EmbEltwiseLayernormPluginDynamicImpl<T>::terminate() {
for (int i = 0; i < embs_gpu_.size(); ++i) {
if (embs_gpu_[i]) {
cudaFree(embs_gpu_[i]);
embs_gpu_[i] = nullptr;
}
}
if (bias_gpu_) {
cudaFree(bias_gpu_);
bias_gpu_ = nullptr;
}
if (scale_gpu_) {
cudaFree(scale_gpu_);
scale_gpu_ = nullptr;
}
}
template <typename T>
int EmbEltwiseLayernormPluginDynamicImpl<T>::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs,
void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT {
auto id_dims = input_desc[0].dims;
int batch = id_dims.d[0];
int seq_len = id_dims.d[1];
int input_num = embs_.size();
cudaGetDevice(&device_id_);
auto in_ptr_gpu_d =
in_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_));
auto emb_ptr_gpu_d =
emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_));
cudaMemcpyAsync(in_ptr_gpu_d, reinterpret_cast<const void *>(inputs),
sizeof(uintptr_t) * input_num, cudaMemcpyHostToDevice,
stream);
auto out_type = output_desc[0].type;
if (std::is_same<T, float>::value) {
PADDLE_ENFORCE_EQ(
out_type == nvinfer1::DataType::kFLOAT, true,
platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only support fp32 input."));
} else if (std::is_same<T, half>::value) {
PADDLE_ENFORCE_EQ(
out_type == nvinfer1::DataType::kHALF, true,
platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only support fp16 input."));
} else {
PADDLE_THROW(platform::errors::Fatal(
"Unsupport data type, the out type of EmbEltwiseLayernorm should be "
"float or half."));
}
auto *output_d = reinterpret_cast<T *>(outputs[0]);
operators::math::EmbEltwiseLayerNormFunctor<T> emb_eltwise_layernorm_func;
emb_eltwise_layernorm_func(batch, seq_len, hidden_size_, in_ptr_gpu_d,
scale_gpu_, bias_gpu_, emb_ptr_gpu_d, output_d,
eps_, input_num, stream);
return cudaGetLastError() != cudaSuccess;
}
template class EmbEltwiseLayernormPluginDynamicImpl<float>;
#ifdef TRT_PLUGIN_FP16_AVALIABLE
template class EmbEltwiseLayernormPluginDynamicImpl<half>;
#endif
int EmbEltwiseLayernormPluginDynamic::initialize() TRT_NOEXCEPT {
impl_->initialize();
return 0;
}
void EmbEltwiseLayernormPluginDynamic::terminate() TRT_NOEXCEPT {
impl_->terminate();
}
nvinfer1::DimsExprs EmbEltwiseLayernormPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { // NOLINT
PADDLE_ENFORCE_EQ(output_index, 0,
platform::errors::InvalidArgument(
"There is only one output of the EmbEltwiseLayernorm, "
"so the index should be zero,"
"but it's (%d)",
output_index));
nvinfer1::DimsExprs ret;
ret.nbDims = 3;
ret.d[0] = inputs[0].d[0];
ret.d[1] = inputs[0].d[1];
ret.d[2] = expr_builder.constant(hidden_size_);
return ret;
}
bool EmbEltwiseLayernormPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_EQ(nb_outputs, 1,
platform::errors::InvalidArgument(
"The EmbEltwiseLayerNorm's output should be one"
"but it's (%d) outputs.",
nb_outputs));
PADDLE_ENFORCE_EQ(nb_outputs, 1,
platform::errors::InvalidArgument(
"The EmbEltwiseLayerNorm's output should be one"
"but it's (%d) outputs.",
nb_outputs));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
int all_nums = nb_inputs + nb_outputs;
const nvinfer1::PluginTensorDesc &desc = in_out[pos];
if (desc.format != nvinfer1::TensorFormat::kLINEAR) {
return false;
}
if (pos == 0) {
return desc.type == nvinfer1::DataType::kINT32;
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
if (pos < all_nums - 1) {
return desc.type == nvinfer1::DataType::kINT32 &&
desc.dims.d[0] == prev.dims.d[0] && desc.dims.d[1] == prev.dims.d[1];
}
if (pos == all_nums - 1) {
if (with_fp16_ == false) {
return desc.type == nvinfer1::DataType::kFLOAT;
} else {
return desc.type == nvinfer1::DataType::kHALF;
}
}
return false;
}
nvinfer1::DataType EmbEltwiseLayernormPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(
index, 0, platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
if (with_fp16_)
return nvinfer1::DataType::kHALF;
else
return nvinfer1::DataType::kFLOAT;
}
int EmbEltwiseLayernormPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs,
void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT {
impl_->enqueue(input_desc, output_desc, inputs, outputs, workspace, stream);
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
f1a0b90451e3ffa14ee342f1ba2d2caf050fdff0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
#include "common.h"
#include "cuda_common.cuh"
#define BLOCK_SIZE 64
__global__ void scan_for_compact(int * input, int * output_index_array,int* auxiliry_array, int input_size)
{
int idx = threadIdx.x;
int gid = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ int local_input[BLOCK_SIZE];
if (input[gid] >0)
{
local_input[idx] = 1;
}
else
{
local_input[idx] = 0;
}
__syncthreads();
// reduction phase
// this can be optimized check wether global memory access for "d" or calculation here is better
int d = ceilf(log2f(BLOCK_SIZE));
int denominator = 0;
int offset = 0;
//reduction should happen per block
for (int i = 1; i <= d; i++)
{
denominator = 1 << i;
offset = 1 << (i - 1);
if (((idx + 1) % denominator) == 0)
{
local_input[idx] += local_input[idx - offset];
}
__syncthreads();
}
////end of reduction phase
//// start of down-sweep phase
if (idx == (BLOCK_SIZE - 1))
{
local_input[idx] = 0;
}
int temp = 0;
int sawp_aux = 0;
for (int i = d; i > 0; i--)
{
temp = 1 << i;
if ((idx != 0) && (idx + 1) % temp == 0)
{
sawp_aux = local_input[idx];
local_input[idx] += local_input[idx - (temp / 2)];
local_input[idx - (temp / 2)] = sawp_aux;
}
__syncthreads();
}
//can this be add to if condition at the begining of the down sweep phase
if (idx == (BLOCK_SIZE - 1))
{
auxiliry_array[blockIdx.x] = local_input[idx];
//printf("%d \n", auxiliry_array[blockIdx.x]);
}
output_index_array[gid] = local_input[idx];
}
__global__ void scan_summation_for_compact(int * output_index_array, int * auxiliry_array, int input_size)
{
int idx = threadIdx.x;
int gid = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ int local_input[BLOCK_SIZE];
local_input[idx] = output_index_array[gid];
__syncthreads();
for (int i = 0; i < blockIdx.x; i++)
{
local_input[idx] += auxiliry_array[i];
}
output_index_array[gid] = local_input[idx];
//printf("gid : %d, value : %d \n", gid, output_index_array[gid]);
}
__global__ void compact_1D_array( int * input, int * output, int * output_index_array, int array_size)
{
int gid = blockDim.x*blockIdx.x + threadIdx.x;
//TO DO handle when gid ==0
//this is very unefficient in memory management
if (gid > 0 && gid < array_size)
{
if (output_index_array[gid] != output_index_array[gid - 1])
{
//printf("gid : %d , index :%d , value : %d, prev_value : %d \n",gid, output_index_array[gid], input[gid], input[gid-1]);
output[output_index_array[gid]] = input[gid-1];
}
}
}
void run_compact()
{
int input_size = 1 << 7;
int input_byte_size = input_size * sizeof(int);
dim3 block(BLOCK_SIZE);
dim3 grid(input_size / block.x);
int aux_byte_size = sizeof(int)*grid.x;
int* h_input, *h_ref, *h_aux_ref, *h_output;
h_input = (int*)malloc(input_byte_size);
h_ref = (int*)malloc(input_byte_size);
h_aux_ref = (int*)malloc(aux_byte_size);
for (int i = 0; i < input_size; i++)
{
if (i % 5 == 0)
{
h_input[i] = i;
}
else
{
h_input[i] = 0;
}
}
int * d_input, *d_output_index_array, *d_aux, *d_sum_input, *d_sum_aux, *d_output;
gpuErrchk(hipMalloc((int**)&d_input, input_byte_size));
gpuErrchk(hipMalloc((int**)&d_output_index_array, input_byte_size));
gpuErrchk(hipMalloc((int**)&d_aux, aux_byte_size));
gpuErrchk(hipMalloc((int**)&d_sum_input, input_byte_size));
gpuErrchk(hipMalloc((int**)&d_sum_aux, aux_byte_size));
gpuErrchk(hipMemcpy(d_input, h_input, input_byte_size, hipMemcpyHostToDevice));
scan_for_compact << <grid, block >> > (d_input, d_output_index_array,d_aux, input_size);
gpuErrchk(hipDeviceSynchronize());
//gpuErrchk(hipMemcpy(d_sum_input, d_output_index_array, input_byte_size, hipMemcpyDeviceToDevice));
gpuErrchk(hipMemcpy(d_sum_aux, d_aux, aux_byte_size, hipMemcpyDeviceToDevice));
scan_summation_for_compact << <grid, block >> > (d_output_index_array, d_sum_aux, input_size);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(h_ref, d_output_index_array, input_byte_size, hipMemcpyDeviceToHost));
int compact_output_size = h_ref[input_size - 1];
int compact_output_byte_size = sizeof(float)*compact_output_size;
h_output = (int*)malloc(compact_output_byte_size);
gpuErrchk(hipMalloc((int**)&d_output, compact_output_byte_size));
compact_1D_array << <grid, block >> > (d_input, d_output, d_output_index_array, input_size);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(h_output, d_output, compact_output_byte_size, hipMemcpyDeviceToHost));
for (int i=0;i<compact_output_size;i++)
{
printf("%d \n",h_output[i]);
}
hipFree(d_sum_input);
hipFree(d_sum_aux);
hipFree(d_input);
hipFree(d_aux);
free(h_input);
free(h_aux_ref);
free(h_ref);
}
//int main()
//{
// run_compact();
// system("pause");
// return 0;
//} | f1a0b90451e3ffa14ee342f1ba2d2caf050fdff0.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <iostream>
#include "common.h"
#include "cuda_common.cuh"
#define BLOCK_SIZE 64
__global__ void scan_for_compact(int * input, int * output_index_array,int* auxiliry_array, int input_size)
{
int idx = threadIdx.x;
int gid = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ int local_input[BLOCK_SIZE];
if (input[gid] >0)
{
local_input[idx] = 1;
}
else
{
local_input[idx] = 0;
}
__syncthreads();
// reduction phase
// this can be optimized check wether global memory access for "d" or calculation here is better
int d = ceilf(log2f(BLOCK_SIZE));
int denominator = 0;
int offset = 0;
//reduction should happen per block
for (int i = 1; i <= d; i++)
{
denominator = 1 << i;
offset = 1 << (i - 1);
if (((idx + 1) % denominator) == 0)
{
local_input[idx] += local_input[idx - offset];
}
__syncthreads();
}
////end of reduction phase
//// start of down-sweep phase
if (idx == (BLOCK_SIZE - 1))
{
local_input[idx] = 0;
}
int temp = 0;
int sawp_aux = 0;
for (int i = d; i > 0; i--)
{
temp = 1 << i;
if ((idx != 0) && (idx + 1) % temp == 0)
{
sawp_aux = local_input[idx];
local_input[idx] += local_input[idx - (temp / 2)];
local_input[idx - (temp / 2)] = sawp_aux;
}
__syncthreads();
}
//can this be add to if condition at the begining of the down sweep phase
if (idx == (BLOCK_SIZE - 1))
{
auxiliry_array[blockIdx.x] = local_input[idx];
//printf("%d \n", auxiliry_array[blockIdx.x]);
}
output_index_array[gid] = local_input[idx];
}
__global__ void scan_summation_for_compact(int * output_index_array, int * auxiliry_array, int input_size)
{
int idx = threadIdx.x;
int gid = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ int local_input[BLOCK_SIZE];
local_input[idx] = output_index_array[gid];
__syncthreads();
for (int i = 0; i < blockIdx.x; i++)
{
local_input[idx] += auxiliry_array[i];
}
output_index_array[gid] = local_input[idx];
//printf("gid : %d, value : %d \n", gid, output_index_array[gid]);
}
__global__ void compact_1D_array( int * input, int * output, int * output_index_array, int array_size)
{
int gid = blockDim.x*blockIdx.x + threadIdx.x;
//TO DO handle when gid ==0
//this is very unefficient in memory management
if (gid > 0 && gid < array_size)
{
if (output_index_array[gid] != output_index_array[gid - 1])
{
//printf("gid : %d , index :%d , value : %d, prev_value : %d \n",gid, output_index_array[gid], input[gid], input[gid-1]);
output[output_index_array[gid]] = input[gid-1];
}
}
}
void run_compact()
{
int input_size = 1 << 7;
int input_byte_size = input_size * sizeof(int);
dim3 block(BLOCK_SIZE);
dim3 grid(input_size / block.x);
int aux_byte_size = sizeof(int)*grid.x;
int* h_input, *h_ref, *h_aux_ref, *h_output;
h_input = (int*)malloc(input_byte_size);
h_ref = (int*)malloc(input_byte_size);
h_aux_ref = (int*)malloc(aux_byte_size);
for (int i = 0; i < input_size; i++)
{
if (i % 5 == 0)
{
h_input[i] = i;
}
else
{
h_input[i] = 0;
}
}
int * d_input, *d_output_index_array, *d_aux, *d_sum_input, *d_sum_aux, *d_output;
gpuErrchk(cudaMalloc((int**)&d_input, input_byte_size));
gpuErrchk(cudaMalloc((int**)&d_output_index_array, input_byte_size));
gpuErrchk(cudaMalloc((int**)&d_aux, aux_byte_size));
gpuErrchk(cudaMalloc((int**)&d_sum_input, input_byte_size));
gpuErrchk(cudaMalloc((int**)&d_sum_aux, aux_byte_size));
gpuErrchk(cudaMemcpy(d_input, h_input, input_byte_size, cudaMemcpyHostToDevice));
scan_for_compact << <grid, block >> > (d_input, d_output_index_array,d_aux, input_size);
gpuErrchk(cudaDeviceSynchronize());
//gpuErrchk(cudaMemcpy(d_sum_input, d_output_index_array, input_byte_size, cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(d_sum_aux, d_aux, aux_byte_size, cudaMemcpyDeviceToDevice));
scan_summation_for_compact << <grid, block >> > (d_output_index_array, d_sum_aux, input_size);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_ref, d_output_index_array, input_byte_size, cudaMemcpyDeviceToHost));
int compact_output_size = h_ref[input_size - 1];
int compact_output_byte_size = sizeof(float)*compact_output_size;
h_output = (int*)malloc(compact_output_byte_size);
gpuErrchk(cudaMalloc((int**)&d_output, compact_output_byte_size));
compact_1D_array << <grid, block >> > (d_input, d_output, d_output_index_array, input_size);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_output, d_output, compact_output_byte_size, cudaMemcpyDeviceToHost));
for (int i=0;i<compact_output_size;i++)
{
printf("%d \n",h_output[i]);
}
cudaFree(d_sum_input);
cudaFree(d_sum_aux);
cudaFree(d_input);
cudaFree(d_aux);
free(h_input);
free(h_aux_ref);
free(h_ref);
}
//int main()
//{
// run_compact();
// system("pause");
// return 0;
//} |
e08b2f001a3eca57fb7051f827cb3a4ee4dfc299.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
using namespace std;
__global__ void Add(int *a, int* b, int* c)
{
printf("a=%d\n", *a);
printf("b=%d\n", *b);
*c = (*a) + (*b);
}
void FirstTask()
{
int a, b, c; // on host
cin >> a >> b;
//cout << "b = " << b << endl;
//cout << "a = " << a << endl;
int *devA, *devB, *devC;
//memory on dev
hipMalloc((void**)&devA, sizeof(int));
hipMalloc((void**)&devB, sizeof(int));
hipMalloc((void**)&devC, sizeof(int));
//copy host to device
hipMemcpy(devA, &a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(devB, &b, sizeof(int), hipMemcpyHostToDevice);
Add << <2, 2 >> > (devA, devB, devC);
//copy of the result from device to host
hipMemcpy(&c, devC, sizeof(int), hipMemcpyDeviceToHost);
printf("%d + %d = %d\n", a, b, c);
}
void SecondTask()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
printf("Device name : %s\n", deviceProp.name);
printf("Total global memory : %d MB\n", deviceProp.totalGlobalMem / 1024 / 1024);
printf("Memory Bus Width: %d\n", deviceProp.memoryBusWidth);
printf("Shared memory per block : %d\n", deviceProp.sharedMemPerBlock);
printf("Registers per block : %d\n", deviceProp.regsPerBlock);
printf("Warp size : %d\n", deviceProp.warpSize);
printf("Memory pitch : %d\n", deviceProp.memPitch);
printf("Max threads per block : %d\n", deviceProp.maxThreadsPerBlock);
printf("Max threads dimensions : x = %d, y = %d, z =%d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("Max grid size: x = %d, y = %d, z = %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf("Clock rate: %d\n", deviceProp.clockRate);
printf("Total constant memory: %d\n", deviceProp.totalConstMem);
printf("Compute capability: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("Texture alignment: %d\n", deviceProp.textureAlignment);
printf("Device overlap: %d\n", deviceProp.deviceOverlap);
printf("Multiprocessor count: %d\n", deviceProp.multiProcessorCount);
printf("Kernel execution timeout enabled: %s\n", deviceProp.kernelExecTimeoutEnabled ? "true" : "false");
}
int main()
{
FirstTask();
SecondTask();
hipDeviceSynchronize();
system("Pause");
return 0;
} | e08b2f001a3eca57fb7051f827cb3a4ee4dfc299.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
using namespace std;
__global__ void Add(int *a, int* b, int* c)
{
printf("a=%d\n", *a);
printf("b=%d\n", *b);
*c = (*a) + (*b);
}
void FirstTask()
{
int a, b, c; // on host
cin >> a >> b;
//cout << "b = " << b << endl;
//cout << "a = " << a << endl;
int *devA, *devB, *devC;
//memory on dev
cudaMalloc((void**)&devA, sizeof(int));
cudaMalloc((void**)&devB, sizeof(int));
cudaMalloc((void**)&devC, sizeof(int));
//copy host to device
cudaMemcpy(devA, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(devB, &b, sizeof(int), cudaMemcpyHostToDevice);
Add << <2, 2 >> > (devA, devB, devC);
//copy of the result from device to host
cudaMemcpy(&c, devC, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d + %d = %d\n", a, b, c);
}
void SecondTask()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
printf("Device name : %s\n", deviceProp.name);
printf("Total global memory : %d MB\n", deviceProp.totalGlobalMem / 1024 / 1024);
printf("Memory Bus Width: %d\n", deviceProp.memoryBusWidth);
printf("Shared memory per block : %d\n", deviceProp.sharedMemPerBlock);
printf("Registers per block : %d\n", deviceProp.regsPerBlock);
printf("Warp size : %d\n", deviceProp.warpSize);
printf("Memory pitch : %d\n", deviceProp.memPitch);
printf("Max threads per block : %d\n", deviceProp.maxThreadsPerBlock);
printf("Max threads dimensions : x = %d, y = %d, z =%d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("Max grid size: x = %d, y = %d, z = %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf("Clock rate: %d\n", deviceProp.clockRate);
printf("Total constant memory: %d\n", deviceProp.totalConstMem);
printf("Compute capability: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("Texture alignment: %d\n", deviceProp.textureAlignment);
printf("Device overlap: %d\n", deviceProp.deviceOverlap);
printf("Multiprocessor count: %d\n", deviceProp.multiProcessorCount);
printf("Kernel execution timeout enabled: %s\n", deviceProp.kernelExecTimeoutEnabled ? "true" : "false");
}
int main()
{
FirstTask();
SecondTask();
cudaDeviceSynchronize();
system("Pause");
return 0;
} |
8fdbcd7ddd8ba81383a61216d2336a6f2221a2a0.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <array>
#include "paddle/fluid/framework/conv_search_cache.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
DECLARE_int64(cudnn_exhaustive_search_times);
namespace paddle {
namespace operators {
#if CUDNN_VERSION >= 7100
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using ScopedActivationDescriptor = platform::ScopedActivationDescriptor;
using DataLayout = platform::DataLayout;
using framework::AlgorithmsCache;
using framework::ConvSearchCache;
template <typename T>
using ScalingParamType = typename platform::CudnnDataType<T>::ScalingParamType;
template <typename T>
class CUDNNConvFusionOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* bias = ctx.Input<Tensor>("Bias");
auto* residual = ctx.Input<Tensor>("ResidualData");
auto* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
const std::string activation = ctx.Attr<std::string>("activation");
int groups = ctx.Attr<int>("groups");
int64_t user_workspace_size =
static_cast<size_t>(ctx.Attr<int>("workspace_size_MB"));
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
const T* filter_data = filter->data<T>();
const T* bias_data = bias->data<T>();
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
Tensor transformed_input_channel(input->type());
Tensor transformed_output(output->type());
transformed_input_channel = *input;
transformed_output = *output;
T* output_data = transformed_output.data<T>();
const T* residual_data = residual ? residual->data<T>() : output_data;
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims =
framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::PermissionDenied(
"Operator Conv2DFusion expects Input to be a 4-D or 5-D Tensor. "
"But recieved the actual dimension = %d, shape = [%s].",
rank, transformed_input_channel.dims()));
}
} else {
transformed_input = transformed_input_channel;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
// ------------------- cudnn descriptors ---------------------
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedFilterDescriptor filter_desc;
ScopedTensorDescriptor bias_desc;
ScopedConvolutionDescriptor conv_desc;
ScopedActivationDescriptor act_desc;
DataLayout layout = DataLayout::kNCHW;
if (input->dims().size() == 5) {
layout = DataLayout::kNCDHW;
}
cudnnConvolutionDescriptor_t cudnn_conv_desc =
conv_desc.descriptor<T>(padding_common, strides, dilations);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnSetConvolutionGroupCount(cudnn_conv_desc,
groups));
cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, framework::vectorize<int>(transformed_input.dims()));
cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, framework::vectorize<int>(transformed_output.dims()));
cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>(
layout, framework::vectorize<int>(filter->dims()));
// Now only support NCHW
std::vector<int> bias_dim = {
1, static_cast<int>(transformed_output.dims()[1]), 1, 1};
cudnnTensorDescriptor_t cudnn_bias_desc =
bias_desc.descriptor<T>(layout, bias_dim);
cudnnActivationDescriptor_t cudnn_act_desc =
act_desc.descriptor<T>(activation);
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size_in_bytes; // final workspace to allocate.
size_t workspace_size_limit = 0;
if (FLAGS_conv_workspace_size_limit > 0 || user_workspace_size > 0) {
int64_t max_user_size =
::min(static_cast<int64_t>(FLAGS_conv_workspace_size_limit),
user_workspace_size);
workspace_size_limit = max_user_size * 1024 * 1024;
}
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionFwdAlgo_t algo;
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetConvolutionMathType(
cudnn_conv_desc, CUDNN_DEFAULT_MATH));
auto x_dims = framework::vectorize(transformed_input.dims());
auto f_dims = framework::vectorize(filter->dims());
if (!exhaustive_search) {
#if CUDNN_VERSION >= 8000
int perf_count;
int best_algo_idx = 0;
size_t tmp_size = 0;
std::unique_ptr<cudnnConvolutionFwdAlgoPerf_t[]> perf_results(
new cudnnConvolutionFwdAlgoPerf_t[kNUM_CUDNN_FWD_ALGS]);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardAlgorithm_v7(
handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc,
cudnn_output_desc, kNUM_CUDNN_FWD_ALGS, &perf_count,
perf_results.get()));
algo = (perf_results.get())[best_algo_idx].algo;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc,
cudnn_output_desc, algo, &workspace_size_in_bytes));
if (workspace_size_in_bytes > workspace_size_limit)
workspace_size_limit = workspace_size_in_bytes;
#else
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardAlgorithm(
handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc,
cudnn_output_desc, CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit, &algo));
VLOG(3) << "cuDNN forward algo " << algo;
#endif
} else {
std::function<cudnnConvolutionFwdAlgo_t()> search_func =
[&]() -> cudnnConvolutionFwdAlgo_t {
int returned_algo_count;
std::array<cudnnConvolutionFwdAlgoPerf_t, kNUM_CUDNN_FWD_ALGS>
fwd_perf_stat;
auto cudnn_find_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnFindConvolutionForwardAlgorithmEx(
handle, cudnn_input_desc, input_data, cudnn_filter_desc,
filter_data, cudnn_conv_desc, cudnn_output_desc, output_data,
kNUM_CUDNN_FWD_ALGS, &returned_algo_count,
fwd_perf_stat.data(), cudnn_workspace, workspace_size_limit));
};
workspace_handle.RunFuncSync(cudnn_find_func, workspace_size_limit);
VLOG(3) << "Perf result: (algo: stat, time, memory)";
for (int i = 0; i < returned_algo_count; ++i) {
const auto& stat = fwd_perf_stat[i];
VLOG(3) << stat.algo << ": " << stat.status << " " << stat.time << " "
<< stat.memory;
}
return fwd_perf_stat[0].algo;
};
AlgorithmsCache<cudnnConvolutionFwdAlgo_t>& algo_cache =
*(framework::ConvSearchCache::Instance().GetConvFusion());
int search_times = ctx.Attr<int>("search_times");
search_times = ::max(
static_cast<int>(FLAGS_cudnn_exhaustive_search_times), search_times);
// TODO(dangqingqing): Unify this if-else.
if (search_times > 0) {
// The searched algo will be cached by `search_times` times for
// different input dimension. For other dimensions, select the algo
// of closest area.
algo = algo_cache.GetAlgorithm(x_dims[2] * x_dims[3], search_times, 0,
search_func);
} else {
auto dtype = platform::CudnnDataType<T>::type;
algo = algo_cache.GetAlgorithm(x_dims, f_dims, strides, paddings,
dilations, 0, dtype, search_func);
}
VLOG(3) << "choose algo " << algo;
}
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc,
cudnn_output_desc, algo, &workspace_size_in_bytes));
PADDLE_ENFORCE_LE(
workspace_size_in_bytes, workspace_size_limit,
platform::errors::InvalidArgument(
"The actual workspace size to be allocated for cuDNN is expected "
"to be less than the limit. But recieved: the actual workspace "
"size = %d, limit = %d.",
workspace_size_in_bytes, workspace_size_limit));
if ((activation == "identity") && (!residual)) {
// Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is
// enabled with CUDNN_ACTIVATION_IDENTITY in cuDNN lib.
// But test in some case, the speed is slower, change to use
// cudnnConvolutionForward and cudnnAddTensor
// ------------- cudnn conv forward and bias add ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnConvolutionForward(
handle, &alpha, cudnn_input_desc, input_data, cudnn_filter_desc,
filter_data, cudnn_conv_desc, algo, cudnn_workspace,
workspace_size_in_bytes, &beta, cudnn_output_desc, output_data));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnAddTensor(
handle, &alpha, cudnn_bias_desc, bias_data, &alpha, cudnn_output_desc,
output_data));
} else {
if (activation == "identity") {
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
}
// ------------------- cudnn conv+bias+act forward --------------------
ScalingParamType<T> alpha1 = 1.0f;
ScalingParamType<T> alpha2 = residual ? 1.0f : 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBiasActivationForward(
handle, &alpha1, cudnn_input_desc, input_data,
cudnn_filter_desc, filter_data, cudnn_conv_desc, algo,
cudnn_workspace, workspace_size_in_bytes, &alpha2,
cudnn_output_desc, residual_data, cudnn_bias_desc, bias_data,
cudnn_act_desc, cudnn_output_desc, output_data));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
}
std::vector<int> channels = ctx.Attr<std::vector<int>>("split_channels");
if (channels.size()) {
auto outs = ctx.MultiOutput<framework::Tensor>("Outputs");
if (x_dims[0] == 1) {
// share data with Output
framework::Tensor t;
t.ShareDataWith(*output);
auto y_dims = output->dims();
t.Resize({y_dims[1], y_dims[2], y_dims[3]});
int s = 0;
for (size_t i = 0; i < channels.size(); ++i) {
int e = s + channels[i];
outs[i]->ShareDataWith(t.Slice(s, e));
outs[i]->Resize({x_dims[0], channels[i], y_dims[2], y_dims[3]});
s = e;
}
} else {
// TODO(qingiqng): do copy when batch size large than 1
PADDLE_THROW(platform::errors::Unimplemented(
"Input with batch size greater than 1 is unsupported. The recieved "
"batch size is %d, Input's shape is [%s].",
x_dims[0], framework::make_ddim(x_dims)));
}
}
}
};
#endif
} // namespace operators
} // namespace paddle
#if CUDNN_VERSION >= 7100
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(conv2d_fusion, ops::CUDNNConvFusionOpKernel<float>,
ops::CUDNNConvFusionOpKernel<double>);
#endif
| 8fdbcd7ddd8ba81383a61216d2336a6f2221a2a0.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <array>
#include "paddle/fluid/framework/conv_search_cache.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
DECLARE_int64(cudnn_exhaustive_search_times);
namespace paddle {
namespace operators {
#if CUDNN_VERSION >= 7100
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using ScopedActivationDescriptor = platform::ScopedActivationDescriptor;
using DataLayout = platform::DataLayout;
using framework::AlgorithmsCache;
using framework::ConvSearchCache;
template <typename T>
using ScalingParamType = typename platform::CudnnDataType<T>::ScalingParamType;
template <typename T>
class CUDNNConvFusionOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* bias = ctx.Input<Tensor>("Bias");
auto* residual = ctx.Input<Tensor>("ResidualData");
auto* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
const std::string activation = ctx.Attr<std::string>("activation");
int groups = ctx.Attr<int>("groups");
int64_t user_workspace_size =
static_cast<size_t>(ctx.Attr<int>("workspace_size_MB"));
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
const T* filter_data = filter->data<T>();
const T* bias_data = bias->data<T>();
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
Tensor transformed_input_channel(input->type());
Tensor transformed_output(output->type());
transformed_input_channel = *input;
transformed_output = *output;
T* output_data = transformed_output.data<T>();
const T* residual_data = residual ? residual->data<T>() : output_data;
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims =
framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::PermissionDenied(
"Operator Conv2DFusion expects Input to be a 4-D or 5-D Tensor. "
"But recieved the actual dimension = %d, shape = [%s].",
rank, transformed_input_channel.dims()));
}
} else {
transformed_input = transformed_input_channel;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
// ------------------- cudnn descriptors ---------------------
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedFilterDescriptor filter_desc;
ScopedTensorDescriptor bias_desc;
ScopedConvolutionDescriptor conv_desc;
ScopedActivationDescriptor act_desc;
DataLayout layout = DataLayout::kNCHW;
if (input->dims().size() == 5) {
layout = DataLayout::kNCDHW;
}
cudnnConvolutionDescriptor_t cudnn_conv_desc =
conv_desc.descriptor<T>(padding_common, strides, dilations);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnSetConvolutionGroupCount(cudnn_conv_desc,
groups));
cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, framework::vectorize<int>(transformed_input.dims()));
cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, framework::vectorize<int>(transformed_output.dims()));
cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>(
layout, framework::vectorize<int>(filter->dims()));
// Now only support NCHW
std::vector<int> bias_dim = {
1, static_cast<int>(transformed_output.dims()[1]), 1, 1};
cudnnTensorDescriptor_t cudnn_bias_desc =
bias_desc.descriptor<T>(layout, bias_dim);
cudnnActivationDescriptor_t cudnn_act_desc =
act_desc.descriptor<T>(activation);
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size_in_bytes; // final workspace to allocate.
size_t workspace_size_limit = 0;
if (FLAGS_conv_workspace_size_limit > 0 || user_workspace_size > 0) {
int64_t max_user_size =
std::min(static_cast<int64_t>(FLAGS_conv_workspace_size_limit),
user_workspace_size);
workspace_size_limit = max_user_size * 1024 * 1024;
}
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionFwdAlgo_t algo;
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetConvolutionMathType(
cudnn_conv_desc, CUDNN_DEFAULT_MATH));
auto x_dims = framework::vectorize(transformed_input.dims());
auto f_dims = framework::vectorize(filter->dims());
if (!exhaustive_search) {
#if CUDNN_VERSION >= 8000
int perf_count;
int best_algo_idx = 0;
size_t tmp_size = 0;
std::unique_ptr<cudnnConvolutionFwdAlgoPerf_t[]> perf_results(
new cudnnConvolutionFwdAlgoPerf_t[kNUM_CUDNN_FWD_ALGS]);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardAlgorithm_v7(
handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc,
cudnn_output_desc, kNUM_CUDNN_FWD_ALGS, &perf_count,
perf_results.get()));
algo = (perf_results.get())[best_algo_idx].algo;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc,
cudnn_output_desc, algo, &workspace_size_in_bytes));
if (workspace_size_in_bytes > workspace_size_limit)
workspace_size_limit = workspace_size_in_bytes;
#else
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardAlgorithm(
handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc,
cudnn_output_desc, CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit, &algo));
VLOG(3) << "cuDNN forward algo " << algo;
#endif
} else {
std::function<cudnnConvolutionFwdAlgo_t()> search_func =
[&]() -> cudnnConvolutionFwdAlgo_t {
int returned_algo_count;
std::array<cudnnConvolutionFwdAlgoPerf_t, kNUM_CUDNN_FWD_ALGS>
fwd_perf_stat;
auto cudnn_find_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnFindConvolutionForwardAlgorithmEx(
handle, cudnn_input_desc, input_data, cudnn_filter_desc,
filter_data, cudnn_conv_desc, cudnn_output_desc, output_data,
kNUM_CUDNN_FWD_ALGS, &returned_algo_count,
fwd_perf_stat.data(), cudnn_workspace, workspace_size_limit));
};
workspace_handle.RunFuncSync(cudnn_find_func, workspace_size_limit);
VLOG(3) << "Perf result: (algo: stat, time, memory)";
for (int i = 0; i < returned_algo_count; ++i) {
const auto& stat = fwd_perf_stat[i];
VLOG(3) << stat.algo << ": " << stat.status << " " << stat.time << " "
<< stat.memory;
}
return fwd_perf_stat[0].algo;
};
AlgorithmsCache<cudnnConvolutionFwdAlgo_t>& algo_cache =
*(framework::ConvSearchCache::Instance().GetConvFusion());
int search_times = ctx.Attr<int>("search_times");
search_times = std::max(
static_cast<int>(FLAGS_cudnn_exhaustive_search_times), search_times);
// TODO(dangqingqing): Unify this if-else.
if (search_times > 0) {
// The searched algo will be cached by `search_times` times for
// different input dimension. For other dimensions, select the algo
// of closest area.
algo = algo_cache.GetAlgorithm(x_dims[2] * x_dims[3], search_times, 0,
search_func);
} else {
auto dtype = platform::CudnnDataType<T>::type;
algo = algo_cache.GetAlgorithm(x_dims, f_dims, strides, paddings,
dilations, 0, dtype, search_func);
}
VLOG(3) << "choose algo " << algo;
}
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc,
cudnn_output_desc, algo, &workspace_size_in_bytes));
PADDLE_ENFORCE_LE(
workspace_size_in_bytes, workspace_size_limit,
platform::errors::InvalidArgument(
"The actual workspace size to be allocated for cuDNN is expected "
"to be less than the limit. But recieved: the actual workspace "
"size = %d, limit = %d.",
workspace_size_in_bytes, workspace_size_limit));
if ((activation == "identity") && (!residual)) {
// Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is
// enabled with CUDNN_ACTIVATION_IDENTITY in cuDNN lib.
// But test in some case, the speed is slower, change to use
// cudnnConvolutionForward and cudnnAddTensor
// ------------- cudnn conv forward and bias add ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnConvolutionForward(
handle, &alpha, cudnn_input_desc, input_data, cudnn_filter_desc,
filter_data, cudnn_conv_desc, algo, cudnn_workspace,
workspace_size_in_bytes, &beta, cudnn_output_desc, output_data));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnAddTensor(
handle, &alpha, cudnn_bias_desc, bias_data, &alpha, cudnn_output_desc,
output_data));
} else {
if (activation == "identity") {
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
}
// ------------------- cudnn conv+bias+act forward --------------------
ScalingParamType<T> alpha1 = 1.0f;
ScalingParamType<T> alpha2 = residual ? 1.0f : 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBiasActivationForward(
handle, &alpha1, cudnn_input_desc, input_data,
cudnn_filter_desc, filter_data, cudnn_conv_desc, algo,
cudnn_workspace, workspace_size_in_bytes, &alpha2,
cudnn_output_desc, residual_data, cudnn_bias_desc, bias_data,
cudnn_act_desc, cudnn_output_desc, output_data));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
}
std::vector<int> channels = ctx.Attr<std::vector<int>>("split_channels");
if (channels.size()) {
auto outs = ctx.MultiOutput<framework::Tensor>("Outputs");
if (x_dims[0] == 1) {
// share data with Output
framework::Tensor t;
t.ShareDataWith(*output);
auto y_dims = output->dims();
t.Resize({y_dims[1], y_dims[2], y_dims[3]});
int s = 0;
for (size_t i = 0; i < channels.size(); ++i) {
int e = s + channels[i];
outs[i]->ShareDataWith(t.Slice(s, e));
outs[i]->Resize({x_dims[0], channels[i], y_dims[2], y_dims[3]});
s = e;
}
} else {
// TODO(qingiqng): do copy when batch size large than 1
PADDLE_THROW(platform::errors::Unimplemented(
"Input with batch size greater than 1 is unsupported. The recieved "
"batch size is %d, Input's shape is [%s].",
x_dims[0], framework::make_ddim(x_dims)));
}
}
}
};
#endif
} // namespace operators
} // namespace paddle
#if CUDNN_VERSION >= 7100
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(conv2d_fusion, ops::CUDNNConvFusionOpKernel<float>,
ops::CUDNNConvFusionOpKernel<double>);
#endif
|
60260ab5bdf5835e6b797897e4a99252cfd122fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <sys/time.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
extern "C" {
#include "sha256.h"
}
#include "sha256_gpu.h"
/** MACROSES **/
/* Macros sig0 is doing:
res = (x >> 7) | (x << 25);
res ^= (x >>18) | (x << 14);
res ^= (x >> 3);
assembler commands:
1. temp reg t1,
2. temp reg t2,
3. t1 = x >> 7
4. t2 = x >> 18
5. t1 = t1 ^ t2
6. t2 = x >> 3
7. res = t1 ^ t2
*/
#define sig0(x,res) \
asm("{\n\t" \
" .reg .u32 t1;\n\t" \
" .reg .u32 t2;\n\t" \
" shf.r.clamp.b32 t1, %1, %1, 7;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 18;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" shr.u32 t2, %1, 3;\n\t" \
" xor.b32 %0, t1, t2;\n\t" \
"}" \
: "=r"(res) : "r" (x));
/* Macros sig1 is doing:
res = (x >> 17) | (x << 15);
res ^= (x >> 19) | (x << 13);
res ^= (x >> 10);
assembler commands:
1. temp reg t1,
2. temp reg t2,
3. t1 = x >> 17
4. t2 = x >> 19
5. t1 = t1 ^ t2
6. t2 = x >> 10
7. res = t1 ^ t2
*/
#define sig1(x,res) \
asm("{\n\t" \
" .reg .u32 t1;\n\t" \
" .reg .u32 t2;\n\t" \
" shf.r.clamp.b32 t1, %1, %1, 17;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 19;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" shr.u32 t2, %1, 10;\n\t" \
" xor.b32 %0, t1, t2;\n\t" \
"}" \
: "=r"(res) : "r" (x));
/* Macros sig0_1 is doing:
res0 = (x >> 7) | (x << 25);
res0 ^= (x >>18) | (x << 14);
res0 ^= (x >> 3);
res1 = (x >> 17) | (x << 15);
res1 ^= (x >> 19) | (x << 13);
res1 ^= (x >> 10);
res = res0 + res1
assembler commands:
1. temp reg t1,
2. temp reg t2,
3. t1 = x >> 7
4. t2 = x >> 18
5. t1 = t1 ^ t2
6. t2 = x >> 3
7. res = t1 ^ t2
8. t1 = x >> 17
9. t2 = x >> 19
10. t1 = t1 ^ t2
11. t2 = x >> 10
12. t1 = t1 ^ t2
13. res = res + t1
*/
#define sig0_1(x,y,res) \
asm("{\n\t" \
" .reg .u32 t1;\n\t" \
" .reg .u32 t2;\n\t" \
" shf.r.clamp.b32 t1, %1, %1, 7;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 18;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" shr.u32 t2, %1, 3;\n\t" \
" xor.b32 %0, t1, t2;\n\t" \
" shf.r.clamp.b32 t1, %2, %2, 17;\n\t" \
" shf.r.clamp.b32 t2, %2, %2, 19;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" shr.u32 t2, %2, 10;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" add.s32 %0, %0, t1;\n\t" \
"}" \
: "+r"(res) : "r"(x), "r"(y));
/* Macros ep0 is doing:
res = (x >> 2) | (x << 30);
res ^= (x >> 13) | (x << 19);
res ^= (x >> 22) | (x << 10);
assembler commands:
1. temp reg t1,
2. temp reg t2,
3. t1 = x >> 2
4. t2 = x >> 13
5. t1 = t1 ^ t2
6. t2 = x >> 22
7. res = t1 ^ t2
*/
#define ep0(x,res) \
asm("{\n\t" \
" .reg .u32 t1;\n\t" \
" .reg .u32 t2;\n\t" \
" shf.r.clamp.b32 t1, %1, %1, 2;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 13;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 22;\n\t" \
" xor.b32 %0, t1, t2;\n\t" \
"}" \
: "=r"(res) : "r" (x));
/* Macros ep1 is doing:
res = (x >> 6) | (x << 26);
res ^= (x >> 11) | (x << 21);
res ^= (x >> 25) | (x << 7);
assembler commands:
1. temp reg t1,
2. temp reg t2,
3. t1 = x >> 6
4. t2 = x >> 11
5. t1 = t1 ^ t2
6. t2 = x >> 25
7. res = t1 ^ t2
*/
#define ep1(x,res) \
asm("{\n\t" \
" .reg .u32 t1;\n\t" \
" .reg .u32 t2;\n\t" \
" shf.r.clamp.b32 t1, %1, %1, 6;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 11;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 25;\n\t" \
" xor.b32 %0, t1, t2;\n\t" \
"}" \
: "=r"(res) : "r" (x));
#define step(h0,h1) \
hi = h0; hii = h1; \
for (int j = 0; j < 32; j++) { \
if ( (hi & mask) == 0 ) { \
our_diff++; \
hi <<= 1; \
} else { \
our_diff *= 256; \
if (j == 31) { \
our_diff += hii >> 24; \
} else { \
our_diff += (hi >> 23) & 0xff; \
if (j > 23) { our_diff += (hii >> (56 - j)); } \
} \
goto end; \
}}
//Constants for SHA-256
__device__ __constant__ static const WORD k[64] = {
0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
};
__device__ __constant__ static const WORD a0 = 0x6a09e667;
__device__ __constant__ static const WORD b0 = 0xbb67ae85;
__device__ __constant__ static const WORD c0 = 0x3c6ef372;
__device__ __constant__ static const WORD d0 = 0xa54ff53a;
__device__ __constant__ static const WORD e0 = 0x510e527f;
__device__ __constant__ static const WORD f0 = 0x9b05688c;
__device__ __constant__ static const WORD g0 = 0x1f83d9ab;
__device__ __constant__ static const WORD h0 = 0x5be0cd19;
//extern __shared__ WORD shared_memory[];
__global__ void kernel_sha256(BYTE *data, WORD difficulty, BYTE *nonce, volatile bool *success, volatile bool *stop, volatile long int *cycles, WORD device_id, long int * cycles_total) {
WORD i, j, our_diff;
long int r;
long int idx = blockIdx.x * blockDim.x + threadIdx.x;
// AMO_SHA256_CTX ctx;
WORD ctx_data[27];
// WORD ctx_state[8];
WORD res0, res1;
WORD m[64];
WORD a, b, c, d, e, f, g, h, t1;
// WORD *m = &shared_memory[64 * threadIdx.x];
#pragma unroll 1
for (i = 0, j = 0; i < 8; ++i, j += 4) {
ctx_data[i] = (data[j] << 24) | (data[j + 1] << 16) | (data[j + 2] << 8) | (data[j + 3]);
}
#pragma unroll 1
for (i = 8, j = 0; i < 13; ++i, j += 4) {
ctx_data[i] = (nonce[j] << 24) | (nonce[j + 1] << 16) | (nonce[j + 2] << 8) | (nonce[j + 3]);
}
ctx_data[13] = (nonce[20] << 24) | (nonce[21] << 16) | (nonce[22] << 8) | (0x80);
unsigned long long int bl = 55 * 8;
ctx_data[14] = (WORD)(bl >> 32);
ctx_data[15] = (WORD)bl;
ctx_data[10] = ctx_data[10] ^ idx;
ctx_data[11] = ctx_data[11] ^ device_id;
// extended data - constants:
#pragma unroll 1
for (int i = 16 ; i < 21; ++i) {
// sig0(ctx_data[i - 15], res0)
// sig1(ctx_data[i - 2], res1)
sig0_1(ctx_data[i - 15], ctx_data[i - 2], res0)
ctx_data[i] = ctx_data[i - 7] + res0 + ctx_data[i - 16];
}
#pragma unroll 1
for (int i = 21 ; i < 27; ++i) {
sig0(ctx_data[i - 15], res0)
// sig1(ctx_data[i - 2], res1)
ctx_data[i] = ctx_data[i - 7] + res0 + ctx_data[i - 16];
}
r = 0;
int index = 9; //(int)(idx % 100);
while (true) {
if ((r % 100) == index) {
if (*stop) {
// *cycles = r;
// __threadfence();
// asm("trap;");
break;
}
}
if ((++ctx_data[12]) == 0) {
ctx_data[13] += 0x0010;
ctx_data[20] += 0x0010; // depends on [13]
}
ctx_data[19]++; // depends on [12]
ctx_data[26]++; // depends on [19] -> [12]
r++;
// sha256_init(ctx_state);
// sha256_transform(ctx_data, ctx_state);
// __device__ void sha256_transform(WORD *ctx_data, WORD *ctx_state) {
{
//#pragma unroll 1
for (i = 0; i < 27; ++i)
m[i] = ctx_data[i];
for (i = 21 ; i < 27; ++i) {
// sig0(ctx_data[i - 15], res0)
sig1(m[i - 2], res1)
m[i] += res1;
}
//#pragma unroll 1
for (i = 27 ; i < 64; ++i) {
// sig0(m[i - 15], res0)
// sig1(m[i - 2], res1)
sig0_1(m[i - 15], m[i - 2], res0)
m[i] = m[i - 7] + res0 + m[i - 16];
}
a = a0;
b = b0;
c = c0;
d = d0;
e = e0;
f = f0;
g = g0;
h = h0;
//#pragma unroll 1
for (i = 0; i < 64; i++) {
ep0(a,res0)
ep1(e,res1)
t1 = h + res1 + ((e & f) ^ (~e & g)) + k[i] + m[i];
h = g;
g = f;
f = e;
e = d + t1;
d = c;
// t1 += res0 + ((a & b) ^ (a & c) ^ (b & c));
t1 += res0 + ((a & (b ^ c)) ^ (b & c));
c = b;
b = a;
a = t1;
}
a += a0;
b += b0;
c += c0;
d += d0;
e += e0;
f += f0;
g += g0;
h += h0;
}
// if ((difficulty > 8192) && (ctx.state[0] != 0)) { //difficulty > 8192
if (a != 0) { //difficulty > 8192
continue;
}
{
WORD hi, hii, mask = 0x80000000;
our_diff = 0;
step(a, b)
step(b, c)
step(c, d)
// step(d, e)
// step(e, f)
// step(f, g)
// step(g, h)
}
end:
// work = hash2int_w();
if( our_diff > difficulty) {
*success = true;
*stop = true;
BYTE * ptrn = (BYTE*)(&(ctx_data[8]));
#pragma unroll 1
for (i = 0; i < 20; i += 4) {
nonce[i] = *(ptrn + 3 + i);
nonce[i + 1] = *(ptrn + 2 + i);
nonce[i + 2] = *(ptrn + 1 + i);
nonce[i + 3] = *(ptrn + i);
}
nonce[i] = *(ptrn + 3 + i);
nonce[i + 1] = *(ptrn + 2 + i);
nonce[i + 2] = *(ptrn + 1 + i);
WORD ctx_state[8];
ctx_state[0] = a;
ctx_state[1] = b;
ctx_state[2] = c;
ctx_state[3] = d;
ctx_state[4] = e;
ctx_state[5] = f;
ctx_state[6] = g;
ctx_state[7] = h;
BYTE * ptr = (BYTE*)ctx_state;
#pragma unroll 1
for (i = 0; i < 32; i += 4) {
data[i] = *(ptr + 3 + i);
data[i + 1] = *(ptr + 2 + i);
data[i + 2] = *(ptr + 1 + i);
data[i + 3] = *(ptr + i);
}
}
}
cycles_total[idx] = r;
if (idx == 1) {
*cycles = r;
}
}
| 60260ab5bdf5835e6b797897e4a99252cfd122fa.cu | #include <cstdio>
#include <cstdlib>
#include <sys/time.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
extern "C" {
#include "sha256.h"
}
#include "sha256_gpu.h"
/** MACROSES **/
/* Macros sig0 is doing:
res = (x >> 7) | (x << 25);
res ^= (x >>18) | (x << 14);
res ^= (x >> 3);
assembler commands:
1. temp reg t1,
2. temp reg t2,
3. t1 = x >> 7
4. t2 = x >> 18
5. t1 = t1 ^ t2
6. t2 = x >> 3
7. res = t1 ^ t2
*/
#define sig0(x,res) \
asm("{\n\t" \
" .reg .u32 t1;\n\t" \
" .reg .u32 t2;\n\t" \
" shf.r.clamp.b32 t1, %1, %1, 7;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 18;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" shr.u32 t2, %1, 3;\n\t" \
" xor.b32 %0, t1, t2;\n\t" \
"}" \
: "=r"(res) : "r" (x));
/* Macros sig1 is doing:
res = (x >> 17) | (x << 15);
res ^= (x >> 19) | (x << 13);
res ^= (x >> 10);
assembler commands:
1. temp reg t1,
2. temp reg t2,
3. t1 = x >> 17
4. t2 = x >> 19
5. t1 = t1 ^ t2
6. t2 = x >> 10
7. res = t1 ^ t2
*/
#define sig1(x,res) \
asm("{\n\t" \
" .reg .u32 t1;\n\t" \
" .reg .u32 t2;\n\t" \
" shf.r.clamp.b32 t1, %1, %1, 17;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 19;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" shr.u32 t2, %1, 10;\n\t" \
" xor.b32 %0, t1, t2;\n\t" \
"}" \
: "=r"(res) : "r" (x));
/* Macros sig0_1 is doing:
res0 = (x >> 7) | (x << 25);
res0 ^= (x >>18) | (x << 14);
res0 ^= (x >> 3);
res1 = (x >> 17) | (x << 15);
res1 ^= (x >> 19) | (x << 13);
res1 ^= (x >> 10);
res = res0 + res1
assembler commands:
1. temp reg t1,
2. temp reg t2,
3. t1 = x >> 7
4. t2 = x >> 18
5. t1 = t1 ^ t2
6. t2 = x >> 3
7. res = t1 ^ t2
8. t1 = x >> 17
9. t2 = x >> 19
10. t1 = t1 ^ t2
11. t2 = x >> 10
12. t1 = t1 ^ t2
13. res = res + t1
*/
#define sig0_1(x,y,res) \
asm("{\n\t" \
" .reg .u32 t1;\n\t" \
" .reg .u32 t2;\n\t" \
" shf.r.clamp.b32 t1, %1, %1, 7;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 18;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" shr.u32 t2, %1, 3;\n\t" \
" xor.b32 %0, t1, t2;\n\t" \
" shf.r.clamp.b32 t1, %2, %2, 17;\n\t" \
" shf.r.clamp.b32 t2, %2, %2, 19;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" shr.u32 t2, %2, 10;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" add.s32 %0, %0, t1;\n\t" \
"}" \
: "+r"(res) : "r"(x), "r"(y));
/* Macros ep0 is doing:
res = (x >> 2) | (x << 30);
res ^= (x >> 13) | (x << 19);
res ^= (x >> 22) | (x << 10);
assembler commands:
1. temp reg t1,
2. temp reg t2,
3. t1 = x >> 2
4. t2 = x >> 13
5. t1 = t1 ^ t2
6. t2 = x >> 22
7. res = t1 ^ t2
*/
#define ep0(x,res) \
asm("{\n\t" \
" .reg .u32 t1;\n\t" \
" .reg .u32 t2;\n\t" \
" shf.r.clamp.b32 t1, %1, %1, 2;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 13;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 22;\n\t" \
" xor.b32 %0, t1, t2;\n\t" \
"}" \
: "=r"(res) : "r" (x));
/* Macros ep1 is doing:
res = (x >> 6) | (x << 26);
res ^= (x >> 11) | (x << 21);
res ^= (x >> 25) | (x << 7);
assembler commands:
1. temp reg t1,
2. temp reg t2,
3. t1 = x >> 6
4. t2 = x >> 11
5. t1 = t1 ^ t2
6. t2 = x >> 25
7. res = t1 ^ t2
*/
#define ep1(x,res) \
asm("{\n\t" \
" .reg .u32 t1;\n\t" \
" .reg .u32 t2;\n\t" \
" shf.r.clamp.b32 t1, %1, %1, 6;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 11;\n\t" \
" xor.b32 t1, t1, t2;\n\t" \
" shf.r.clamp.b32 t2, %1, %1, 25;\n\t" \
" xor.b32 %0, t1, t2;\n\t" \
"}" \
: "=r"(res) : "r" (x));
#define step(h0,h1) \
hi = h0; hii = h1; \
for (int j = 0; j < 32; j++) { \
if ( (hi & mask) == 0 ) { \
our_diff++; \
hi <<= 1; \
} else { \
our_diff *= 256; \
if (j == 31) { \
our_diff += hii >> 24; \
} else { \
our_diff += (hi >> 23) & 0xff; \
if (j > 23) { our_diff += (hii >> (56 - j)); } \
} \
goto end; \
}}
//Constants for SHA-256
__device__ __constant__ static const WORD k[64] = {
0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
};
__device__ __constant__ static const WORD a0 = 0x6a09e667;
__device__ __constant__ static const WORD b0 = 0xbb67ae85;
__device__ __constant__ static const WORD c0 = 0x3c6ef372;
__device__ __constant__ static const WORD d0 = 0xa54ff53a;
__device__ __constant__ static const WORD e0 = 0x510e527f;
__device__ __constant__ static const WORD f0 = 0x9b05688c;
__device__ __constant__ static const WORD g0 = 0x1f83d9ab;
__device__ __constant__ static const WORD h0 = 0x5be0cd19;
//extern __shared__ WORD shared_memory[];
__global__ void kernel_sha256(BYTE *data, WORD difficulty, BYTE *nonce, volatile bool *success, volatile bool *stop, volatile long int *cycles, WORD device_id, long int * cycles_total) {
WORD i, j, our_diff;
long int r;
long int idx = blockIdx.x * blockDim.x + threadIdx.x;
// AMO_SHA256_CTX ctx;
WORD ctx_data[27];
// WORD ctx_state[8];
WORD res0, res1;
WORD m[64];
WORD a, b, c, d, e, f, g, h, t1;
// WORD *m = &shared_memory[64 * threadIdx.x];
#pragma unroll 1
for (i = 0, j = 0; i < 8; ++i, j += 4) {
ctx_data[i] = (data[j] << 24) | (data[j + 1] << 16) | (data[j + 2] << 8) | (data[j + 3]);
}
#pragma unroll 1
for (i = 8, j = 0; i < 13; ++i, j += 4) {
ctx_data[i] = (nonce[j] << 24) | (nonce[j + 1] << 16) | (nonce[j + 2] << 8) | (nonce[j + 3]);
}
ctx_data[13] = (nonce[20] << 24) | (nonce[21] << 16) | (nonce[22] << 8) | (0x80);
unsigned long long int bl = 55 * 8;
ctx_data[14] = (WORD)(bl >> 32);
ctx_data[15] = (WORD)bl;
ctx_data[10] = ctx_data[10] ^ idx;
ctx_data[11] = ctx_data[11] ^ device_id;
// extended data - constants:
#pragma unroll 1
for (int i = 16 ; i < 21; ++i) {
// sig0(ctx_data[i - 15], res0)
// sig1(ctx_data[i - 2], res1)
sig0_1(ctx_data[i - 15], ctx_data[i - 2], res0)
ctx_data[i] = ctx_data[i - 7] + res0 + ctx_data[i - 16];
}
#pragma unroll 1
for (int i = 21 ; i < 27; ++i) {
sig0(ctx_data[i - 15], res0)
// sig1(ctx_data[i - 2], res1)
ctx_data[i] = ctx_data[i - 7] + res0 + ctx_data[i - 16];
}
r = 0;
int index = 9; //(int)(idx % 100);
while (true) {
if ((r % 100) == index) {
if (*stop) {
// *cycles = r;
// __threadfence();
// asm("trap;");
break;
}
}
if ((++ctx_data[12]) == 0) {
ctx_data[13] += 0x0010;
ctx_data[20] += 0x0010; // depends on [13]
}
ctx_data[19]++; // depends on [12]
ctx_data[26]++; // depends on [19] -> [12]
r++;
// sha256_init(ctx_state);
// sha256_transform(ctx_data, ctx_state);
// __device__ void sha256_transform(WORD *ctx_data, WORD *ctx_state) {
{
//#pragma unroll 1
for (i = 0; i < 27; ++i)
m[i] = ctx_data[i];
for (i = 21 ; i < 27; ++i) {
// sig0(ctx_data[i - 15], res0)
sig1(m[i - 2], res1)
m[i] += res1;
}
//#pragma unroll 1
for (i = 27 ; i < 64; ++i) {
// sig0(m[i - 15], res0)
// sig1(m[i - 2], res1)
sig0_1(m[i - 15], m[i - 2], res0)
m[i] = m[i - 7] + res0 + m[i - 16];
}
a = a0;
b = b0;
c = c0;
d = d0;
e = e0;
f = f0;
g = g0;
h = h0;
//#pragma unroll 1
for (i = 0; i < 64; i++) {
ep0(a,res0)
ep1(e,res1)
t1 = h + res1 + ((e & f) ^ (~e & g)) + k[i] + m[i];
h = g;
g = f;
f = e;
e = d + t1;
d = c;
// t1 += res0 + ((a & b) ^ (a & c) ^ (b & c));
t1 += res0 + ((a & (b ^ c)) ^ (b & c));
c = b;
b = a;
a = t1;
}
a += a0;
b += b0;
c += c0;
d += d0;
e += e0;
f += f0;
g += g0;
h += h0;
}
// if ((difficulty > 8192) && (ctx.state[0] != 0)) { //difficulty > 8192
if (a != 0) { //difficulty > 8192
continue;
}
{
WORD hi, hii, mask = 0x80000000;
our_diff = 0;
step(a, b)
step(b, c)
step(c, d)
// step(d, e)
// step(e, f)
// step(f, g)
// step(g, h)
}
end:
// work = hash2int_w();
if( our_diff > difficulty) {
*success = true;
*stop = true;
BYTE * ptrn = (BYTE*)(&(ctx_data[8]));
#pragma unroll 1
for (i = 0; i < 20; i += 4) {
nonce[i] = *(ptrn + 3 + i);
nonce[i + 1] = *(ptrn + 2 + i);
nonce[i + 2] = *(ptrn + 1 + i);
nonce[i + 3] = *(ptrn + i);
}
nonce[i] = *(ptrn + 3 + i);
nonce[i + 1] = *(ptrn + 2 + i);
nonce[i + 2] = *(ptrn + 1 + i);
WORD ctx_state[8];
ctx_state[0] = a;
ctx_state[1] = b;
ctx_state[2] = c;
ctx_state[3] = d;
ctx_state[4] = e;
ctx_state[5] = f;
ctx_state[6] = g;
ctx_state[7] = h;
BYTE * ptr = (BYTE*)ctx_state;
#pragma unroll 1
for (i = 0; i < 32; i += 4) {
data[i] = *(ptr + 3 + i);
data[i + 1] = *(ptr + 2 + i);
data[i + 2] = *(ptr + 1 + i);
data[i + 3] = *(ptr + i);
}
}
}
cycles_total[idx] = r;
if (idx == 1) {
*cycles = r;
}
}
|
cb8498f0d02150c645b009a8c39a079f769834cb.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Hartwig Anzt
@author Goran Flegar
@generated from sparse/blas/zgeisai_batched32.cu, normal z -> s, Wed Jan 2 14:18:54 2019
*/
#include "magmasparse_internal.h"
#include "shuffle.cuh"
#include <hip/hip_runtime_api.h>
#define PRECISION_s
#define REAL
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#if (TORCH_HIP_VERSION >= 7000) // only for cuda>6000
const int MaxBlockSize = 32;
template <int block_size>
__device__ void
magma_slowerisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
float rB; // registers for trsv
float dA[ block_size ]; // registers for trisystem
float rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_S_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_S_ONE : MAGMA_S_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
float top = magmablas_sshfl(rB, k%block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_slowerisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
if (N == block_size) {
magma_slowerisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_slowerisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_slowerisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
;
// out of range - do nothing.
// printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_slowerisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_slowerisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_supperisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
float rB; // registers for trsv
float dA[ block_size ]; // registers for trisystem
float rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_S_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_S_ONE : MAGMA_S_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
float bottom = magmablas_sshfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_supperisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
if (N == block_size) {
magma_supperisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_supperisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_supperisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
;
// out of range - do nothing.
// printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_supperisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_supperisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
#endif
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_s_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_s_matrix*
SPAI preconditioner CSR col-major
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_sisai_generator_regs(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_s_matrix L,
magma_s_matrix *M,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (TORCH_HIP_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
int r2bs1 = 32;
int r2bs2 = 4;
int necessary_blocks = magma_ceildiv(L.num_rows, r2bs2);
int r2dg1 = min( int( sqrt( float( necessary_blocks ))), 65535 );
int r2dg2 = min(magma_ceildiv( necessary_blocks, r2dg1 ), 65535);
int r2dg3 = magma_ceildiv( necessary_blocks, r2dg1*r2dg2 );
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
if (arch >= 300) {
if (uplotype == MagmaLower) { //printf("in here lower new kernel\n");
hipLaunchKernelGGL(( magma_slowerisai_regs_inv_switch), dim3(r2grid), dim3(r2block), 0, queue->cuda_stream() ,
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
else { // printf("in here upper new kernel\n");
hipLaunchKernelGGL(( magma_supperisai_regs_inv_switch), dim3(r2grid), dim3(r2block), 0, queue->cuda_stream() ,
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
}
else {
printf( "%% error: ISAI preconditioner requires CUDA ARCHITECTURE >= 300.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA >= 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
| cb8498f0d02150c645b009a8c39a079f769834cb.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Hartwig Anzt
@author Goran Flegar
@generated from sparse/blas/zgeisai_batched32.cu, normal z -> s, Wed Jan 2 14:18:54 2019
*/
#include "magmasparse_internal.h"
#include "shuffle.cuh"
#include <cuda_profiler_api.h>
#define PRECISION_s
#define REAL
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <cuda.h> // for CUDA_VERSION
#if (CUDA_VERSION >= 7000) // only for cuda>6000
const int MaxBlockSize = 32;
template <int block_size>
__device__ void
magma_slowerisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
float rB; // registers for trsv
float dA[ block_size ]; // registers for trisystem
float rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_S_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_S_ONE : MAGMA_S_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
float top = magmablas_sshfl(rB, k%block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_slowerisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
if (N == block_size) {
magma_slowerisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_slowerisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_slowerisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
;
// out of range - do nothing.
// printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_slowerisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_slowerisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_supperisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
float rB; // registers for trsv
float dA[ block_size ]; // registers for trisystem
float rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_S_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_S_ONE : MAGMA_S_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
float bottom = magmablas_sshfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_supperisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
if (N == block_size) {
magma_supperisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_supperisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_supperisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
;
// out of range - do nothing.
// printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_supperisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const float * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
float *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_supperisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
#endif
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_s_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_s_matrix*
SPAI preconditioner CSR col-major
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_sisai_generator_regs(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_s_matrix L,
magma_s_matrix *M,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (CUDA_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
int r2bs1 = 32;
int r2bs2 = 4;
int necessary_blocks = magma_ceildiv(L.num_rows, r2bs2);
int r2dg1 = min( int( sqrt( float( necessary_blocks ))), 65535 );
int r2dg2 = min(magma_ceildiv( necessary_blocks, r2dg1 ), 65535);
int r2dg3 = magma_ceildiv( necessary_blocks, r2dg1*r2dg2 );
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
if (arch >= 300) {
if (uplotype == MagmaLower) { //printf("in here lower new kernel\n");
magma_slowerisai_regs_inv_switch<<< r2grid, r2block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
else { // printf("in here upper new kernel\n");
magma_supperisai_regs_inv_switch<<< r2grid, r2block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
}
else {
printf( "%% error: ISAI preconditioner requires CUDA ARCHITECTURE >= 300.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA >= 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
|
b983afc9e546bd43d2cd3be5062fd231824a1c5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <accelerate_cuda.h>
static TexFloat arrIn0_1;
static TexFloat arrIn0_0;
extern "C" __global__ void generate(const Int64 shIn0_1, const Int64 shIn0_0, const Int64 shOut_1, const Int64 shOut_0, float* __restrict__ arrOut_1, float* __restrict__ arrOut_0)
{
const int shapeSize = shOut_1 * shOut_0;
const int gridSize = __umul24(blockDim.x, gridDim.x);
int ix;
for (ix = __umul24(blockDim.x, blockIdx.x) + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 tmp_0 = ix;
const Int64 tmp_1 = tmp_0 / shOut_0;
const Int64 sh1 = tmp_1 % shOut_1;
const Int64 sh0 = tmp_0 % shOut_0;
const Int64 v0 = (Int64) 0;
const Int64 v1 = (Int64) 1;
const Int64 v2 = sh1 * shIn0_0 + sh0;
const float v3 = indexArray(arrIn0_1, v2);
const float v4 = indexArray(arrIn0_0, v2);
const float v5 = (float) shIn0_0;
const float v6 = fmaxf(-0.5f, fminf(0.5f + v5, (float) sh0 - 0.1f * v5 * v3));
const float v7 = (float) shIn0_1;
const float v8 = fmaxf(-0.5f, fminf(0.5f + v7, (float) sh1 - 0.1f * v7 * v4));
const Int64 v9 = (Int64) -1 + (Int64) truncf(1.0f + v6);
const Int64 v10 = (Int64) -1 + (Int64) truncf(1.0f + v8);
const float v11 = v6 - (float) v9;
const float v12 = v8 - (float) v10;
const Int64 v13 = v1 + v10;
const float v14 = 1.0f - v12;
const float v15 = 1.0f - v11;
const Word8 v16 = v10 < v0 || (v9 < v0 || (v10 >= shIn0_1 || v9 >= shIn0_0));
const Int64 v17 = v10 * shIn0_0 + v9;
const float v18 = v16 ? 0.0f : indexArray(arrIn0_1, v17);
const float v19 = v16 ? 0.0f : indexArray(arrIn0_0, v17);
const float v20 = v14 * v18;
const float v21 = v14 * v19;
const Word8 v22 = v13 < v0 || (v9 < v0 || (v13 >= shIn0_1 || v9 >= shIn0_0));
const Int64 v23 = v13 * shIn0_0 + v9;
const float v24 = v22 ? 0.0f : indexArray(arrIn0_1, v23);
const float v25 = v22 ? 0.0f : indexArray(arrIn0_0, v23);
const float v26 = v12 * v24;
const float v27 = v12 * v25;
const float v28 = v20 + v26;
const float v29 = v21 + v27;
const float v30 = v15 * v28;
const float v31 = v15 * v29;
const Int64 v32 = v1 + v9;
const Word8 v33 = v10 < v0 || (v32 < v0 || (v10 >= shIn0_1 || v32 >= shIn0_0));
const Int64 v34 = v10 * shIn0_0 + v32;
const float v35 = v33 ? 0.0f : indexArray(arrIn0_1, v34);
const float v36 = v33 ? 0.0f : indexArray(arrIn0_0, v34);
const float v37 = v14 * v35;
const float v38 = v14 * v36;
const Word8 v39 = v13 < v0 || (v32 < v0 || (v13 >= shIn0_1 || v32 >= shIn0_0));
const Int64 v40 = v13 * shIn0_0 + v32;
const float v41 = v39 ? 0.0f : indexArray(arrIn0_1, v40);
const float v42 = v39 ? 0.0f : indexArray(arrIn0_0, v40);
const float v43 = v12 * v41;
const float v44 = v12 * v42;
const float v45 = v37 + v43;
const float v46 = v38 + v44;
const float v47 = v11 * v45;
const float v48 = v11 * v46;
arrOut_1[ix] = v30 + v47;
arrOut_0[ix] = v31 + v48;
}
}
| b983afc9e546bd43d2cd3be5062fd231824a1c5c.cu | #include <accelerate_cuda.h>
static TexFloat arrIn0_1;
static TexFloat arrIn0_0;
extern "C" __global__ void generate(const Int64 shIn0_1, const Int64 shIn0_0, const Int64 shOut_1, const Int64 shOut_0, float* __restrict__ arrOut_1, float* __restrict__ arrOut_0)
{
const int shapeSize = shOut_1 * shOut_0;
const int gridSize = __umul24(blockDim.x, gridDim.x);
int ix;
for (ix = __umul24(blockDim.x, blockIdx.x) + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 tmp_0 = ix;
const Int64 tmp_1 = tmp_0 / shOut_0;
const Int64 sh1 = tmp_1 % shOut_1;
const Int64 sh0 = tmp_0 % shOut_0;
const Int64 v0 = (Int64) 0;
const Int64 v1 = (Int64) 1;
const Int64 v2 = sh1 * shIn0_0 + sh0;
const float v3 = indexArray(arrIn0_1, v2);
const float v4 = indexArray(arrIn0_0, v2);
const float v5 = (float) shIn0_0;
const float v6 = fmaxf(-0.5f, fminf(0.5f + v5, (float) sh0 - 0.1f * v5 * v3));
const float v7 = (float) shIn0_1;
const float v8 = fmaxf(-0.5f, fminf(0.5f + v7, (float) sh1 - 0.1f * v7 * v4));
const Int64 v9 = (Int64) -1 + (Int64) truncf(1.0f + v6);
const Int64 v10 = (Int64) -1 + (Int64) truncf(1.0f + v8);
const float v11 = v6 - (float) v9;
const float v12 = v8 - (float) v10;
const Int64 v13 = v1 + v10;
const float v14 = 1.0f - v12;
const float v15 = 1.0f - v11;
const Word8 v16 = v10 < v0 || (v9 < v0 || (v10 >= shIn0_1 || v9 >= shIn0_0));
const Int64 v17 = v10 * shIn0_0 + v9;
const float v18 = v16 ? 0.0f : indexArray(arrIn0_1, v17);
const float v19 = v16 ? 0.0f : indexArray(arrIn0_0, v17);
const float v20 = v14 * v18;
const float v21 = v14 * v19;
const Word8 v22 = v13 < v0 || (v9 < v0 || (v13 >= shIn0_1 || v9 >= shIn0_0));
const Int64 v23 = v13 * shIn0_0 + v9;
const float v24 = v22 ? 0.0f : indexArray(arrIn0_1, v23);
const float v25 = v22 ? 0.0f : indexArray(arrIn0_0, v23);
const float v26 = v12 * v24;
const float v27 = v12 * v25;
const float v28 = v20 + v26;
const float v29 = v21 + v27;
const float v30 = v15 * v28;
const float v31 = v15 * v29;
const Int64 v32 = v1 + v9;
const Word8 v33 = v10 < v0 || (v32 < v0 || (v10 >= shIn0_1 || v32 >= shIn0_0));
const Int64 v34 = v10 * shIn0_0 + v32;
const float v35 = v33 ? 0.0f : indexArray(arrIn0_1, v34);
const float v36 = v33 ? 0.0f : indexArray(arrIn0_0, v34);
const float v37 = v14 * v35;
const float v38 = v14 * v36;
const Word8 v39 = v13 < v0 || (v32 < v0 || (v13 >= shIn0_1 || v32 >= shIn0_0));
const Int64 v40 = v13 * shIn0_0 + v32;
const float v41 = v39 ? 0.0f : indexArray(arrIn0_1, v40);
const float v42 = v39 ? 0.0f : indexArray(arrIn0_0, v40);
const float v43 = v12 * v41;
const float v44 = v12 * v42;
const float v45 = v37 + v43;
const float v46 = v38 + v44;
const float v47 = v11 * v45;
const float v48 = v11 * v46;
arrOut_1[ix] = v30 + v47;
arrOut_0[ix] = v31 + v48;
}
}
|
ade804694843c339206ff1158898832780c4ed77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DoCrop( const int nthreads,
const int src_height, const int src_width, const Dtype* src_data,
const int dest_height, const int dest_width, Dtype* dest_data)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int x = index % dest_width; //w-pos
int y = (index / dest_width) % dest_height; //h-pos
int cn = index / dest_width / dest_height; // channel*num
float x_src = x + (src_width-dest_width)/2;
float y_src = y + (src_height-dest_height)/2;
int index_src = src_width*(src_height*cn + y_src) + x_src;
// write sample to destination
dest_data[index] = src_data[index_src];
}
}
template <typename Dtype>
__global__ void CropBackward( const int nthreads,
const int src_height, const int src_width, const Dtype* src_data,
const int dest_height, const int dest_width, Dtype* dest_data)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int x = index % src_width; //w-pos
int y = (index / src_width) % src_height; //h-pos
int cn = index / src_width / src_height; // channel*num
float x_dest = x + (dest_width-src_width)/2;
float y_dest = y + (dest_height-src_height)/2;
int index_dest = dest_width*(dest_height*cn + y_dest) + x_dest;
// write sample to destination
dest_data[index_dest] = src_data[index];
}
}
template <typename Dtype>
void CropSimpleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
Dtype* top_data = top[0]->mutable_gpu_data(); // dest
int topwidth = top[0]->width();
int topheight = top[0]->height();
int topchannels = top[0]->channels();
int topcount = top[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data(); // source
int bottomchannels = (bottom)[0]->channels();
int bottomwidth = (bottom)[0]->width();
int bottomheight = (bottom)[0]->height();
int bottomcount = (bottom)[0]->count();
int num = (bottom)[0]->num(); CHECK_EQ((bottom)[0]->num(), top[0]->num());
hipLaunchKernelGGL(( DoCrop<Dtype>), dim3(CAFFE_GET_BLOCKS(topcount)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
topcount, bottomheight, bottomwidth, bottom_data,
topheight, topwidth, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void CropSimpleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int count = (top)[0]->count();
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff);
hipLaunchKernelGGL(( CropBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count,
(top)[0]->height(), (top)[0]->width(), top_diff,
bottom[0]->height(), bottom[0]->width(), bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CropSimpleLayer);
} // namespace caffe
| ade804694843c339206ff1158898832780c4ed77.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DoCrop( const int nthreads,
const int src_height, const int src_width, const Dtype* src_data,
const int dest_height, const int dest_width, Dtype* dest_data)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int x = index % dest_width; //w-pos
int y = (index / dest_width) % dest_height; //h-pos
int cn = index / dest_width / dest_height; // channel*num
float x_src = x + (src_width-dest_width)/2;
float y_src = y + (src_height-dest_height)/2;
int index_src = src_width*(src_height*cn + y_src) + x_src;
// write sample to destination
dest_data[index] = src_data[index_src];
}
}
template <typename Dtype>
__global__ void CropBackward( const int nthreads,
const int src_height, const int src_width, const Dtype* src_data,
const int dest_height, const int dest_width, Dtype* dest_data)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int x = index % src_width; //w-pos
int y = (index / src_width) % src_height; //h-pos
int cn = index / src_width / src_height; // channel*num
float x_dest = x + (dest_width-src_width)/2;
float y_dest = y + (dest_height-src_height)/2;
int index_dest = dest_width*(dest_height*cn + y_dest) + x_dest;
// write sample to destination
dest_data[index_dest] = src_data[index];
}
}
template <typename Dtype>
void CropSimpleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
Dtype* top_data = top[0]->mutable_gpu_data(); // dest
int topwidth = top[0]->width();
int topheight = top[0]->height();
int topchannels = top[0]->channels();
int topcount = top[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data(); // source
int bottomchannels = (bottom)[0]->channels();
int bottomwidth = (bottom)[0]->width();
int bottomheight = (bottom)[0]->height();
int bottomcount = (bottom)[0]->count();
int num = (bottom)[0]->num(); CHECK_EQ((bottom)[0]->num(), top[0]->num());
DoCrop<Dtype><<<CAFFE_GET_BLOCKS(topcount), CAFFE_CUDA_NUM_THREADS>>>(
topcount, bottomheight, bottomwidth, bottom_data,
topheight, topwidth, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void CropSimpleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int count = (top)[0]->count();
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff);
CropBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count,
(top)[0]->height(), (top)[0]->width(), top_diff,
bottom[0]->height(), bottom[0]->width(), bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CropSimpleLayer);
} // namespace caffe
|
09bd7ede7c53d19bbf2a63b64dcded42195f239a.hip | // !!! This is a file automatically generated by hipify!!!
#include "PyC_types.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "stdio.h"
#include <sys/time.h>
#define DEBUG 0
#define T 6
#define chunksize 8 // how many kernel calls' worth of data to copy at once. n_elem should be divisible by this
extern "C" {
// cleaner error handling; just wrap cuda library calls with gpuErrchk(foo());
#define gpuErr(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__
void reduction(double *H,double *R)
{
extern __shared__ double sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = H[i];
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid<s)
{
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if (tid==0){R[blockIdx.x]=sdata[0];}
}
__global__
void outerloop(double *V1112,double *Rcca1,double *Ra2,double *H,int n_orb1,int n_orb2,double *V1222,double *Rcaa2, double *Rc1,double *h)
{
int p1=threadIdx.x+blockIdx.x*blockDim.x;
int q1=threadIdx.y+blockIdx.y*blockDim.y;
int r1=threadIdx.z+blockIdx.z*blockDim.z;
double Hlocal=0;
if (p1 <n_orb1 && q1 <n_orb1 && r1<n_orb1)
{
for (int s2=0; s2<n_orb2; s2++)
{
//upperloop
Hlocal += V1112[((p1*n_orb1 + q1)*n_orb1 + r1)*n_orb2 + s2] * Rcca1[(q1*n_orb1 + p1)*n_orb1 + r1] * Ra2[s2] + V1222[((p1*n_orb2 + q1)*n_orb2 + r1)*n_orb2 + s2] * Rc1[p1] * Rcaa2[(q1*n_orb2 + s2)*n_orb2 + r1];
}
//bottomloop
H[(p1*n_orb1+q1)*n_orb1+r1]= 2*Hlocal + (r1==0)?(h[p1*n_orb2 + q1] * Rc1[p1] * Ra2[q1]):0;
}
//reduction still performed externally
}
void dimer_1min1pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1_in, PyInt* n_orb2_in, Double** Rc1, Double** Rcca1, Double** Ra2, Double** Rcaa2, Double** h, Double** V1, Double** V2)
{
struct timeval start,stop;
gettimeofday(&start,0);
double *d_V1112,*d_Rcca1,*d_Ra2,*d_V1222,*d_Rcaa2,*d_Rc1,*d_H,*h_H,*d_h,*h_Hr,*d_Hr;
// All of these assume that the values of n_orb1 and n_orb2 don't change
const int n_orb1 = n_orb1_in[0];
const int n_orb2 = n_orb2_in[0];
const int blocks = (n_orb1*n_orb1*n_orb1)/(T*T);
const int nbpgrid= n_orb1/T;
const int N_V1112 = n_orb1*n_orb1*n_orb1*n_orb2;
const int N_Rcca1 = n_orb1*n_orb1*n_orb1;
const int N_Ra2 = n_orb2;
const int N_V1222 = n_orb1*n_orb2*n_orb2*n_orb2;
const int N_Rcaa2 = n_orb2*n_orb2*n_orb2;
const int N_Rc1 = n_orb1;
const int N_H = n_orb1*n_orb1*n_orb1; // this assumes n_orb1 = n_orb2
const int N_h = n_orb1*n_orb2;
const int N_Hr = blocks;
const dim3 dimblock(T,T,T);
const dim3 dimgrid(nbpgrid,nbpgrid,nbpgrid);
const dim3 dimblockR(T*T);
const dim3 dimgridR(blocks);
const int numStreams = 7;
// Allocations
hipStream_t streams[numStreams];
for(int k=0; k<numStreams; k++) {
gpuErr(hipStreamCreate(&streams[k]));
}
gpuErr(hipMalloc((void **) &d_V1112, sizeof(double)*N_V1112*chunksize));
gpuErr(hipMalloc((void **) &d_Ra2, sizeof(double)*N_Ra2*chunksize));
gpuErr(hipMalloc((void **) &d_V1222, sizeof(double)*N_V1222*chunksize));
gpuErr(hipMalloc((void **) &d_Rcca1, sizeof(double)*N_Rcca1*chunksize));
gpuErr(hipMalloc((void **) &d_Rcaa2, sizeof(double)*N_Rcaa2*chunksize));
gpuErr(hipMalloc((void **) &d_Rc1, sizeof(double)*N_Rc1*chunksize));
gpuErr(hipMalloc((void **) &d_H, sizeof(double)*N_H*chunksize));
gpuErr(hipMalloc((void **) &d_h, sizeof(double)*N_h*chunksize));
gpuErr(hipMalloc((void **) &d_Hr, sizeof(double)*N_Hr));
h_Hr=(double *)malloc(sizeof(double)*N_Hr);
h_H=(double *)malloc(sizeof(double)*N_H);
for(int n=0; n<n_elem; n++) {
if(DEBUG) printf("n = %d\n", n);
int index = i[n]*dim[n]+j[n];
double tmp = 0.0;
if(!(n%chunksize)) {
gpuErr(hipMemcpyAsync(d_Rcca1, Rcca1[n], sizeof(double)*N_Rcca1*chunksize, hipMemcpyHostToDevice, streams[0]));
gpuErr(hipMemcpyAsync(d_Rcaa2, Rcaa2[n], sizeof(double)*N_Rcaa2*chunksize, hipMemcpyHostToDevice, streams[1]));
gpuErr(hipMemcpyAsync(d_Rc1, Rc1[n], sizeof(double)*N_Rc1*chunksize, hipMemcpyHostToDevice, streams[2]));
gpuErr(hipMemcpyAsync(d_Ra2, Ra2[n], sizeof(double)*N_Ra2*chunksize, hipMemcpyHostToDevice, streams[3]));
gpuErr(hipMemcpyAsync(d_h, h[n], sizeof(double)*N_h*chunksize, hipMemcpyHostToDevice, streams[4]));
gpuErr(hipMemcpyAsync(d_V1112, V1[n], sizeof(double)*N_V1112*chunksize, hipMemcpyHostToDevice, streams[5]));
gpuErr(hipMemcpyAsync(d_V1222, V2[n], sizeof(double)*N_V1222*chunksize, hipMemcpyHostToDevice, streams[6]));
for(int k=0; k<numStreams; k++) {
gpuErr(hipStreamSynchronize(streams[k]));
}
}
if(DEBUG) printf("Launching outerloop\n");
hipLaunchKernelGGL(( outerloop), dim3(dimgrid),dim3(dimblock), 0, 0, d_V1112,d_Rcca1,d_Ra2,d_H,n_orb1,n_orb2,d_V1222,d_Rcaa2,d_Rc1,d_h);
gpuErr(hipPeekAtLastError());
gpuErr(hipDeviceSynchronize());
if(DEBUG) printf("Launching reduction\n");
hipLaunchKernelGGL(( reduction), dim3(dimgridR),dim3(dimblockR),sizeof(double)*T*T, 0, d_H,d_Hr);
gpuErr(hipPeekAtLastError());
gpuErr(hipMemcpy(h_Hr, d_Hr, sizeof(double)*N_Hr, hipMemcpyDeviceToHost));
for(int k=0; k<blocks; k++) {
tmp += h_Hr[k];
}
H[n][index] = tmp;
}
// Cleanup
for(int k=0; k<numStreams; k++) {
gpuErr(hipStreamDestroy(streams[k]));
}
gpuErr(hipFree(d_V1112));
gpuErr(hipFree(d_Rcca1));
gpuErr(hipFree(d_Ra2));
gpuErr(hipFree(d_V1222));
gpuErr(hipFree(d_Rcaa2));
gpuErr(hipFree(d_Rc1));
gpuErr(hipFree(d_H));
gpuErr(hipFree(d_h));
gpuErr(hipFree(d_Hr));
free(h_Hr);
free(h_H);
gettimeofday(&stop,0);
if(DEBUG) {
double t = (double)(stop.tv_sec-start.tv_sec)*1000+(double)(stop.tv_usec-start.tv_usec)/1000;
printf("dimer_1min1pls_loop inline cpu version finished in %f ms\n", t);
}
}
} | 09bd7ede7c53d19bbf2a63b64dcded42195f239a.cu | #include "PyC_types.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "stdio.h"
#include <sys/time.h>
#define DEBUG 0
#define T 6
#define chunksize 8 // how many kernel calls' worth of data to copy at once. n_elem should be divisible by this
extern "C" {
// cleaner error handling; just wrap cuda library calls with gpuErrchk(foo());
#define gpuErr(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__
void reduction(double *H,double *R)
{
extern __shared__ double sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = H[i];
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid<s)
{
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if (tid==0){R[blockIdx.x]=sdata[0];}
}
__global__
void outerloop(double *V1112,double *Rcca1,double *Ra2,double *H,int n_orb1,int n_orb2,double *V1222,double *Rcaa2, double *Rc1,double *h)
{
int p1=threadIdx.x+blockIdx.x*blockDim.x;
int q1=threadIdx.y+blockIdx.y*blockDim.y;
int r1=threadIdx.z+blockIdx.z*blockDim.z;
double Hlocal=0;
if (p1 <n_orb1 && q1 <n_orb1 && r1<n_orb1)
{
for (int s2=0; s2<n_orb2; s2++)
{
//upperloop
Hlocal += V1112[((p1*n_orb1 + q1)*n_orb1 + r1)*n_orb2 + s2] * Rcca1[(q1*n_orb1 + p1)*n_orb1 + r1] * Ra2[s2] + V1222[((p1*n_orb2 + q1)*n_orb2 + r1)*n_orb2 + s2] * Rc1[p1] * Rcaa2[(q1*n_orb2 + s2)*n_orb2 + r1];
}
//bottomloop
H[(p1*n_orb1+q1)*n_orb1+r1]= 2*Hlocal + (r1==0)?(h[p1*n_orb2 + q1] * Rc1[p1] * Ra2[q1]):0;
}
//reduction still performed externally
}
void dimer_1min1pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1_in, PyInt* n_orb2_in, Double** Rc1, Double** Rcca1, Double** Ra2, Double** Rcaa2, Double** h, Double** V1, Double** V2)
{
struct timeval start,stop;
gettimeofday(&start,0);
double *d_V1112,*d_Rcca1,*d_Ra2,*d_V1222,*d_Rcaa2,*d_Rc1,*d_H,*h_H,*d_h,*h_Hr,*d_Hr;
// All of these assume that the values of n_orb1 and n_orb2 don't change
const int n_orb1 = n_orb1_in[0];
const int n_orb2 = n_orb2_in[0];
const int blocks = (n_orb1*n_orb1*n_orb1)/(T*T);
const int nbpgrid= n_orb1/T;
const int N_V1112 = n_orb1*n_orb1*n_orb1*n_orb2;
const int N_Rcca1 = n_orb1*n_orb1*n_orb1;
const int N_Ra2 = n_orb2;
const int N_V1222 = n_orb1*n_orb2*n_orb2*n_orb2;
const int N_Rcaa2 = n_orb2*n_orb2*n_orb2;
const int N_Rc1 = n_orb1;
const int N_H = n_orb1*n_orb1*n_orb1; // this assumes n_orb1 = n_orb2
const int N_h = n_orb1*n_orb2;
const int N_Hr = blocks;
const dim3 dimblock(T,T,T);
const dim3 dimgrid(nbpgrid,nbpgrid,nbpgrid);
const dim3 dimblockR(T*T);
const dim3 dimgridR(blocks);
const int numStreams = 7;
// Allocations
cudaStream_t streams[numStreams];
for(int k=0; k<numStreams; k++) {
gpuErr(cudaStreamCreate(&streams[k]));
}
gpuErr(cudaMalloc((void **) &d_V1112, sizeof(double)*N_V1112*chunksize));
gpuErr(cudaMalloc((void **) &d_Ra2, sizeof(double)*N_Ra2*chunksize));
gpuErr(cudaMalloc((void **) &d_V1222, sizeof(double)*N_V1222*chunksize));
gpuErr(cudaMalloc((void **) &d_Rcca1, sizeof(double)*N_Rcca1*chunksize));
gpuErr(cudaMalloc((void **) &d_Rcaa2, sizeof(double)*N_Rcaa2*chunksize));
gpuErr(cudaMalloc((void **) &d_Rc1, sizeof(double)*N_Rc1*chunksize));
gpuErr(cudaMalloc((void **) &d_H, sizeof(double)*N_H*chunksize));
gpuErr(cudaMalloc((void **) &d_h, sizeof(double)*N_h*chunksize));
gpuErr(cudaMalloc((void **) &d_Hr, sizeof(double)*N_Hr));
h_Hr=(double *)malloc(sizeof(double)*N_Hr);
h_H=(double *)malloc(sizeof(double)*N_H);
for(int n=0; n<n_elem; n++) {
if(DEBUG) printf("n = %d\n", n);
int index = i[n]*dim[n]+j[n];
double tmp = 0.0;
if(!(n%chunksize)) {
gpuErr(cudaMemcpyAsync(d_Rcca1, Rcca1[n], sizeof(double)*N_Rcca1*chunksize, cudaMemcpyHostToDevice, streams[0]));
gpuErr(cudaMemcpyAsync(d_Rcaa2, Rcaa2[n], sizeof(double)*N_Rcaa2*chunksize, cudaMemcpyHostToDevice, streams[1]));
gpuErr(cudaMemcpyAsync(d_Rc1, Rc1[n], sizeof(double)*N_Rc1*chunksize, cudaMemcpyHostToDevice, streams[2]));
gpuErr(cudaMemcpyAsync(d_Ra2, Ra2[n], sizeof(double)*N_Ra2*chunksize, cudaMemcpyHostToDevice, streams[3]));
gpuErr(cudaMemcpyAsync(d_h, h[n], sizeof(double)*N_h*chunksize, cudaMemcpyHostToDevice, streams[4]));
gpuErr(cudaMemcpyAsync(d_V1112, V1[n], sizeof(double)*N_V1112*chunksize, cudaMemcpyHostToDevice, streams[5]));
gpuErr(cudaMemcpyAsync(d_V1222, V2[n], sizeof(double)*N_V1222*chunksize, cudaMemcpyHostToDevice, streams[6]));
for(int k=0; k<numStreams; k++) {
gpuErr(cudaStreamSynchronize(streams[k]));
}
}
if(DEBUG) printf("Launching outerloop\n");
outerloop<<<dimgrid,dimblock>>>(d_V1112,d_Rcca1,d_Ra2,d_H,n_orb1,n_orb2,d_V1222,d_Rcaa2,d_Rc1,d_h);
gpuErr(cudaPeekAtLastError());
gpuErr(cudaDeviceSynchronize());
if(DEBUG) printf("Launching reduction\n");
reduction<<<dimgridR,dimblockR,sizeof(double)*T*T>>>(d_H,d_Hr);
gpuErr(cudaPeekAtLastError());
gpuErr(cudaMemcpy(h_Hr, d_Hr, sizeof(double)*N_Hr, cudaMemcpyDeviceToHost));
for(int k=0; k<blocks; k++) {
tmp += h_Hr[k];
}
H[n][index] = tmp;
}
// Cleanup
for(int k=0; k<numStreams; k++) {
gpuErr(cudaStreamDestroy(streams[k]));
}
gpuErr(cudaFree(d_V1112));
gpuErr(cudaFree(d_Rcca1));
gpuErr(cudaFree(d_Ra2));
gpuErr(cudaFree(d_V1222));
gpuErr(cudaFree(d_Rcaa2));
gpuErr(cudaFree(d_Rc1));
gpuErr(cudaFree(d_H));
gpuErr(cudaFree(d_h));
gpuErr(cudaFree(d_Hr));
free(h_Hr);
free(h_H);
gettimeofday(&stop,0);
if(DEBUG) {
double t = (double)(stop.tv_sec-start.tv_sec)*1000+(double)(stop.tv_usec-start.tv_usec)/1000;
printf("dimer_1min1pls_loop inline cpu version finished in %f ms\n", t);
}
}
} |
b1cace9ec8272ece81cc8a34f1fb392aa2e46c60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "FilterBgConf.hpp"
#include "common.hpp"
__global__ void KernelFilterBgConf(int batchSize, int numPriorboxes, int numClasses, float objectness_score, const float *arm_conf, const float *odm_conf, float *conf)
{
int priorboxesId = threadIdx.x + blockIdx.x * blockDim.x;
if (priorboxesId < batchSize * numPriorboxes)
{
if (arm_conf[2 * priorboxesId + 1] < objectness_score)
{
for (int c = 0; c < numClasses; ++c)
{
if (c != 0)
conf[priorboxesId * numClasses + c] = 0.0;
else
conf[priorboxesId * numClasses + c] = 1.0;
}
}
else
{
for (int c = 0; c < numClasses; c++)
conf[priorboxesId * numClasses + c] = odm_conf[priorboxesId * numClasses + c];
}
}
}
void FilterBgConf(int batchSize, int numPriorboxes, int numClasses, float objectness_score, const float *arm_conf, const float *odm_conf, float *conf)
{
int block = GetBlocks(batchSize * numPriorboxes);
int grid = (batchSize * numPriorboxes + block - 1) / block;
hipLaunchKernelGGL(( KernelFilterBgConf), dim3(grid), dim3(block), 0, 0, batchSize, numPriorboxes, numClasses, objectness_score, arm_conf, odm_conf, conf);
}
| b1cace9ec8272ece81cc8a34f1fb392aa2e46c60.cu | #include "FilterBgConf.hpp"
#include "common.hpp"
__global__ void KernelFilterBgConf(int batchSize, int numPriorboxes, int numClasses, float objectness_score, const float *arm_conf, const float *odm_conf, float *conf)
{
int priorboxesId = threadIdx.x + blockIdx.x * blockDim.x;
if (priorboxesId < batchSize * numPriorboxes)
{
if (arm_conf[2 * priorboxesId + 1] < objectness_score)
{
for (int c = 0; c < numClasses; ++c)
{
if (c != 0)
conf[priorboxesId * numClasses + c] = 0.0;
else
conf[priorboxesId * numClasses + c] = 1.0;
}
}
else
{
for (int c = 0; c < numClasses; c++)
conf[priorboxesId * numClasses + c] = odm_conf[priorboxesId * numClasses + c];
}
}
}
void FilterBgConf(int batchSize, int numPriorboxes, int numClasses, float objectness_score, const float *arm_conf, const float *odm_conf, float *conf)
{
int block = GetBlocks(batchSize * numPriorboxes);
int grid = (batchSize * numPriorboxes + block - 1) / block;
KernelFilterBgConf<<<grid, block>>>(batchSize, numPriorboxes, numClasses, objectness_score, arm_conf, odm_conf, conf);
}
|
55c352a1850cda7f073795a1a2b9f0002bdb2ac8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/legacy/cuda/include/hl_base.h"
#include "paddle/legacy/cuda/include/hl_sparse.ph"
#include "paddle/legacy/cuda/include/hl_top_k.h"
#include "paddle/legacy/utils/Logging.h"
// using namespace hppl;
struct Pair {
__device__ __forceinline__ Pair() {}
__device__ __forceinline__ Pair(real value, int id) : v_(value), id_(id) {}
__device__ __forceinline__ void set(real value, int id) {
v_ = value;
id_ = id;
}
__device__ __forceinline__ void operator=(const Pair& in) {
v_ = in.v_;
id_ = in.id_;
}
__device__ __forceinline__ bool operator<(const real value) const {
return (v_ < value);
}
__device__ __forceinline__ bool operator<(const Pair& in) const {
return (v_ < in.v_) || ((v_ == in.v_) && (id_ > in.id_));
}
__device__ __forceinline__ bool operator>(const Pair& in) const {
return (v_ > in.v_) || ((v_ == in.v_) && (id_ < in.id_));
}
real v_;
int id_;
};
__device__ __forceinline__ void addTo(Pair topK[],
const Pair& p,
int beamSize) {
for (int k = beamSize - 2; k >= 0; k--) {
if (topK[k] < p) {
topK[k + 1] = topK[k];
} else {
topK[k + 1] = p;
return;
}
}
topK[0] = p;
}
template <int beamSize>
__device__ __forceinline__ void addTo(Pair topK[], const Pair& p) {
for (int k = beamSize - 2; k >= 0; k--) {
if (topK[k] < p) {
topK[k + 1] = topK[k];
} else {
topK[k + 1] = p;
return;
}
}
topK[0] = p;
}
template <int blockSize>
__device__ __forceinline__ void getTopK(
Pair topK[], real* src, int idx, int dim, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < src[idx]) {
Pair tmp(src[idx], idx);
addTo(topK, tmp, beamSize);
}
idx += blockSize;
}
}
template <int blockSize>
__device__ __forceinline__ void getTopK(
Pair topK[], real* src, int idx, int dim, const Pair& max, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < src[idx]) {
Pair tmp(src[idx], idx);
if (tmp < max) {
addTo(topK, tmp, beamSize);
}
}
idx += blockSize;
}
}
template <int blockSize>
__device__ __forceinline__ void getTopK(
Pair topK[], real* val, int* col, int idx, int dim, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < val[idx]) {
Pair tmp(val[idx], col[idx]);
addTo(topK, tmp, beamSize);
}
idx += blockSize;
}
}
template <int blockSize>
__device__ __forceinline__ void getTopK(Pair topK[],
real* val,
int* col,
int idx,
int dim,
const Pair& max,
int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < val[idx]) {
Pair tmp(val[idx], col[idx]);
if (tmp < max) {
addTo(topK, tmp, beamSize);
}
}
idx += blockSize;
}
}
template <int maxLength, int blockSize>
__device__ __forceinline__ void threadGetTopK(Pair topK[],
int& beam,
int beamSize,
real* src,
bool& firstStep,
bool& isEmpty,
Pair& max,
int dim,
const int tid) {
if (beam > 0) {
int length = beam < beamSize ? beam : beamSize;
if (firstStep) {
firstStep = false;
getTopK<blockSize>(topK, src, tid, dim, length);
} else {
for (int k = 0; k < maxLength; k++) {
if (k < maxLength - beam) {
topK[k] = topK[k + beam];
} else {
topK[k].set(-HL_FLOAT_MAX, -1);
}
}
if (!isEmpty) {
getTopK<blockSize>(topK + maxLength - beam, src, tid, dim, max, length);
}
}
max = topK[maxLength - 1];
if (max.id_ == -1) isEmpty = true;
beam = 0;
}
}
template <int maxLength, int blockSize>
__device__ __forceinline__ void threadGetTopK(Pair topK[],
int& beam,
int beamSize,
real* val,
int* col,
bool& firstStep,
bool& isEmpty,
Pair& max,
int dim,
const int tid) {
if (beam > 0) {
int length = beam < beamSize ? beam : beamSize;
if (firstStep) {
firstStep = false;
getTopK<blockSize>(topK, val, col, tid, dim, length);
} else {
for (int k = 0; k < maxLength; k++) {
if (k < maxLength - beam) {
topK[k] = topK[k + beam];
} else {
topK[k].set(-HL_FLOAT_MAX, -1);
}
}
if (!isEmpty) {
getTopK<blockSize>(
topK + maxLength - beam, val, col, tid, dim, max, length);
}
}
max = topK[maxLength - 1];
if (max.id_ == -1) isEmpty = true;
beam = 0;
}
}
template <int maxLength, int blockSize>
__device__ __forceinline__ void blockReduce(Pair* shTopK,
int* maxId,
Pair topK[],
real** topVal,
int** topIds,
int& beam,
int& beamSize,
const int tid,
const int warp) {
while (true) {
__syncthreads();
if (tid < blockSize / 2) {
if (shTopK[tid] < shTopK[tid + blockSize / 2]) {
maxId[tid] = tid + blockSize / 2;
} else {
maxId[tid] = tid;
}
}
__syncthreads();
for (int stride = blockSize / 4; stride > 0; stride = stride / 2) {
if (tid < stride) {
if (shTopK[maxId[tid]] < shTopK[maxId[tid + stride]]) {
maxId[tid] = maxId[tid + stride];
}
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
**topVal = shTopK[maxId[0]].v_;
**topIds = shTopK[maxId[0]].id_;
(*topVal)++;
(*topIds)++;
}
if (tid == maxId[0]) beam++;
if (--beamSize == 0) break;
__syncthreads();
// NOTE(zcd): temporary solution
unsigned mask = 0u;
CREATE_SHFL_MASK(mask, true);
if (tid == maxId[0]) {
if (beam < maxLength) {
shTopK[tid] = topK[beam];
}
}
if (maxId[0] / 32 == warp) {
if (__shfl_sync(mask, beam, (maxId[0]) % 32, 32) == maxLength) break;
}
}
}
/**
* Each block compute one sample.
* In a block:
* 1. every thread get top maxLength value;
* 2. merge to shTopK, block reduce and get max value;
* 3. go to the second setp, until one thread's topK value is null;
* 4. go to the first setp, until get the topK value.
*/
template <int maxLength, int blockSize>
__global__ void KeMatrixTopK(real* topVal,
int ldv,
int* topIds,
real* src,
int lds,
int dim,
int beamSize) {
__shared__ Pair shTopK[blockSize];
__shared__ int maxId[blockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
src += blockIdx.x * lds;
topVal += blockIdx.x * ldv;
topIds += blockIdx.x * beamSize;
Pair topK[maxLength]; // NOLINT
int beam = maxLength;
Pair max;
bool isEmpty = false;
bool firstStep = true;
for (int k = 0; k < maxLength; k++) {
topK[k].set(-HL_FLOAT_MAX, -1);
}
while (beamSize) {
threadGetTopK<maxLength, blockSize>(
topK, beam, beamSize, src, firstStep, isEmpty, max, dim, tid);
shTopK[tid] = topK[0];
blockReduce<maxLength, blockSize>(
shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp);
}
}
template <int maxLength, int blockSize>
__global__ void KeSMatrixTopK(real* topVal,
int ldv,
int* topIds,
real* val,
int* row,
int* col,
int beamSize) {
__shared__ Pair shTopK[blockSize];
__shared__ int maxId[blockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
topVal += blockIdx.x * ldv;
topIds += blockIdx.x * beamSize;
Pair topK[maxLength]; // NOLINT
int beam = maxLength;
Pair max;
bool isEmpty = false;
bool firstStep = true;
int start = row[blockIdx.x];
int end = row[blockIdx.x + 1];
int dim = end - start;
val += start;
col += start;
if (beamSize > dim) {
// if the number of values to sort are less than the output size,
// use -1 to indicate the end of valid sorted values.
if (tid == 0) {
topIds[dim] = -1;
}
beamSize = dim;
}
for (int k = 0; k < maxLength; k++) {
topK[k].set(-HL_FLOAT_MAX, -1);
}
while (beamSize) {
threadGetTopK<maxLength, blockSize>(
topK, beam, beamSize, val, col, firstStep, isEmpty, max, dim, tid);
shTopK[tid] = topK[0];
blockReduce<maxLength, blockSize>(
shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp);
}
}
void hl_matrix_top_k(real* topVal,
int ldv,
int* topIds,
real* src,
int lds,
int dim,
int beamSize,
int numSamples) {
CHECK_NOTNULL(topVal);
CHECK_NOTNULL(topIds);
CHECK_NOTNULL(src);
if (beamSize > dim) beamSize = dim;
dim3 threads(256, 1);
dim3 grid(numSamples, 1);
hipLaunchKernelGGL(( KeMatrixTopK<5, 256>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
topVal, ldv, topIds, src, lds, dim, beamSize);
CHECK_SYNC("hl_matrix_top_k failed");
}
void hl_sparse_matrix_top_k(real* topVal,
int ldv,
int* topIds,
hl_sparse_matrix_s src,
int beamSize,
int numSamples) {
CHECK_NOTNULL(topVal);
CHECK_NOTNULL(topIds);
CHECK_NOTNULL(src);
CHECK_EQ(src->format, HL_SPARSE_CSR) << "sparse matrix format error!";
hl_csr_matrix csr = (hl_csr_matrix)src->matrix;
if (csr->csr_val == NULL || csr->csr_row == NULL || csr->csr_col == NULL) {
LOG(FATAL) << "parameter src is null!";
}
dim3 threads(256, 1);
dim3 grid(numSamples, 1);
hipLaunchKernelGGL(( KeSMatrixTopK<5, 256>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
topVal, ldv, topIds, csr->csr_val, csr->csr_row, csr->csr_col, beamSize);
CHECK_SYNC("hl_sparse_matrix_top_k failed");
}
/**
* Each block compute one sample.
* In a block:
* 1. every thread get top maxLength value;
* 2. merge to shTopK, block reduce and get max value;
* 3. go to the second setp, until one thread's topK value is null;
* 4. go to the first setp, until get the topK value.
*/
template <int maxLength, int blockSize>
__global__ void KeMatrixTopKClassificationError(real* topVal,
int ldv,
int* topIds,
real* src,
int lds,
int dim,
int beamSize,
int* label,
real* recResult) {
__shared__ Pair shTopK[blockSize];
__shared__ int maxId[blockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
src += blockIdx.x * lds;
topVal += blockIdx.x * ldv;
topIds += blockIdx.x * beamSize;
Pair topK[maxLength]; // NOLINT
int beam = maxLength;
Pair max;
bool isEmpty = false;
bool firstStep = true;
int topkSize = beamSize;
for (int k = 0; k < maxLength; k++) {
topK[k].set(-HL_FLOAT_MAX, -1);
}
while (beamSize) {
threadGetTopK<maxLength, blockSize>(
topK, beam, beamSize, src, firstStep, isEmpty, max, dim, tid);
shTopK[tid] = topK[0];
blockReduce<maxLength, blockSize>(
shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp);
}
__syncthreads();
if (tid == 0) {
for (int i = 0; i < topkSize; i++) {
if (*--topIds == label[blockIdx.x]) {
recResult[blockIdx.x] = 0;
break;
}
recResult[blockIdx.x] = 1.0f;
}
}
}
void hl_matrix_classification_error(real* topVal,
int ldv,
int* topIds,
real* src,
int lds,
int dim,
int topkSize,
int numSamples,
int* label,
real* recResult) {
CHECK_NOTNULL(topVal);
CHECK_NOTNULL(topIds);
CHECK_NOTNULL(src);
if (topkSize > dim) topkSize = dim;
dim3 threads(256, 1);
dim3 grid(numSamples, 1);
hipLaunchKernelGGL(( KeMatrixTopKClassificationError<5, 256>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
topVal, ldv, topIds, src, lds, dim, topkSize, label, recResult);
CHECK_SYNC("hl_matrix_top_k classification error failed");
}
| 55c352a1850cda7f073795a1a2b9f0002bdb2ac8.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/legacy/cuda/include/hl_base.h"
#include "paddle/legacy/cuda/include/hl_sparse.ph"
#include "paddle/legacy/cuda/include/hl_top_k.h"
#include "paddle/legacy/utils/Logging.h"
// using namespace hppl;
struct Pair {
__device__ __forceinline__ Pair() {}
__device__ __forceinline__ Pair(real value, int id) : v_(value), id_(id) {}
__device__ __forceinline__ void set(real value, int id) {
v_ = value;
id_ = id;
}
__device__ __forceinline__ void operator=(const Pair& in) {
v_ = in.v_;
id_ = in.id_;
}
__device__ __forceinline__ bool operator<(const real value) const {
return (v_ < value);
}
__device__ __forceinline__ bool operator<(const Pair& in) const {
return (v_ < in.v_) || ((v_ == in.v_) && (id_ > in.id_));
}
__device__ __forceinline__ bool operator>(const Pair& in) const {
return (v_ > in.v_) || ((v_ == in.v_) && (id_ < in.id_));
}
real v_;
int id_;
};
__device__ __forceinline__ void addTo(Pair topK[],
const Pair& p,
int beamSize) {
for (int k = beamSize - 2; k >= 0; k--) {
if (topK[k] < p) {
topK[k + 1] = topK[k];
} else {
topK[k + 1] = p;
return;
}
}
topK[0] = p;
}
template <int beamSize>
__device__ __forceinline__ void addTo(Pair topK[], const Pair& p) {
for (int k = beamSize - 2; k >= 0; k--) {
if (topK[k] < p) {
topK[k + 1] = topK[k];
} else {
topK[k + 1] = p;
return;
}
}
topK[0] = p;
}
template <int blockSize>
__device__ __forceinline__ void getTopK(
Pair topK[], real* src, int idx, int dim, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < src[idx]) {
Pair tmp(src[idx], idx);
addTo(topK, tmp, beamSize);
}
idx += blockSize;
}
}
template <int blockSize>
__device__ __forceinline__ void getTopK(
Pair topK[], real* src, int idx, int dim, const Pair& max, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < src[idx]) {
Pair tmp(src[idx], idx);
if (tmp < max) {
addTo(topK, tmp, beamSize);
}
}
idx += blockSize;
}
}
template <int blockSize>
__device__ __forceinline__ void getTopK(
Pair topK[], real* val, int* col, int idx, int dim, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < val[idx]) {
Pair tmp(val[idx], col[idx]);
addTo(topK, tmp, beamSize);
}
idx += blockSize;
}
}
template <int blockSize>
__device__ __forceinline__ void getTopK(Pair topK[],
real* val,
int* col,
int idx,
int dim,
const Pair& max,
int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < val[idx]) {
Pair tmp(val[idx], col[idx]);
if (tmp < max) {
addTo(topK, tmp, beamSize);
}
}
idx += blockSize;
}
}
template <int maxLength, int blockSize>
__device__ __forceinline__ void threadGetTopK(Pair topK[],
int& beam,
int beamSize,
real* src,
bool& firstStep,
bool& isEmpty,
Pair& max,
int dim,
const int tid) {
if (beam > 0) {
int length = beam < beamSize ? beam : beamSize;
if (firstStep) {
firstStep = false;
getTopK<blockSize>(topK, src, tid, dim, length);
} else {
for (int k = 0; k < maxLength; k++) {
if (k < maxLength - beam) {
topK[k] = topK[k + beam];
} else {
topK[k].set(-HL_FLOAT_MAX, -1);
}
}
if (!isEmpty) {
getTopK<blockSize>(topK + maxLength - beam, src, tid, dim, max, length);
}
}
max = topK[maxLength - 1];
if (max.id_ == -1) isEmpty = true;
beam = 0;
}
}
template <int maxLength, int blockSize>
__device__ __forceinline__ void threadGetTopK(Pair topK[],
int& beam,
int beamSize,
real* val,
int* col,
bool& firstStep,
bool& isEmpty,
Pair& max,
int dim,
const int tid) {
if (beam > 0) {
int length = beam < beamSize ? beam : beamSize;
if (firstStep) {
firstStep = false;
getTopK<blockSize>(topK, val, col, tid, dim, length);
} else {
for (int k = 0; k < maxLength; k++) {
if (k < maxLength - beam) {
topK[k] = topK[k + beam];
} else {
topK[k].set(-HL_FLOAT_MAX, -1);
}
}
if (!isEmpty) {
getTopK<blockSize>(
topK + maxLength - beam, val, col, tid, dim, max, length);
}
}
max = topK[maxLength - 1];
if (max.id_ == -1) isEmpty = true;
beam = 0;
}
}
template <int maxLength, int blockSize>
__device__ __forceinline__ void blockReduce(Pair* shTopK,
int* maxId,
Pair topK[],
real** topVal,
int** topIds,
int& beam,
int& beamSize,
const int tid,
const int warp) {
while (true) {
__syncthreads();
if (tid < blockSize / 2) {
if (shTopK[tid] < shTopK[tid + blockSize / 2]) {
maxId[tid] = tid + blockSize / 2;
} else {
maxId[tid] = tid;
}
}
__syncthreads();
for (int stride = blockSize / 4; stride > 0; stride = stride / 2) {
if (tid < stride) {
if (shTopK[maxId[tid]] < shTopK[maxId[tid + stride]]) {
maxId[tid] = maxId[tid + stride];
}
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
**topVal = shTopK[maxId[0]].v_;
**topIds = shTopK[maxId[0]].id_;
(*topVal)++;
(*topIds)++;
}
if (tid == maxId[0]) beam++;
if (--beamSize == 0) break;
__syncthreads();
// NOTE(zcd): temporary solution
unsigned mask = 0u;
CREATE_SHFL_MASK(mask, true);
if (tid == maxId[0]) {
if (beam < maxLength) {
shTopK[tid] = topK[beam];
}
}
if (maxId[0] / 32 == warp) {
if (__shfl_sync(mask, beam, (maxId[0]) % 32, 32) == maxLength) break;
}
}
}
/**
* Each block compute one sample.
* In a block:
* 1. every thread get top maxLength value;
* 2. merge to shTopK, block reduce and get max value;
* 3. go to the second setp, until one thread's topK value is null;
* 4. go to the first setp, until get the topK value.
*/
template <int maxLength, int blockSize>
__global__ void KeMatrixTopK(real* topVal,
int ldv,
int* topIds,
real* src,
int lds,
int dim,
int beamSize) {
__shared__ Pair shTopK[blockSize];
__shared__ int maxId[blockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
src += blockIdx.x * lds;
topVal += blockIdx.x * ldv;
topIds += blockIdx.x * beamSize;
Pair topK[maxLength]; // NOLINT
int beam = maxLength;
Pair max;
bool isEmpty = false;
bool firstStep = true;
for (int k = 0; k < maxLength; k++) {
topK[k].set(-HL_FLOAT_MAX, -1);
}
while (beamSize) {
threadGetTopK<maxLength, blockSize>(
topK, beam, beamSize, src, firstStep, isEmpty, max, dim, tid);
shTopK[tid] = topK[0];
blockReduce<maxLength, blockSize>(
shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp);
}
}
template <int maxLength, int blockSize>
__global__ void KeSMatrixTopK(real* topVal,
int ldv,
int* topIds,
real* val,
int* row,
int* col,
int beamSize) {
__shared__ Pair shTopK[blockSize];
__shared__ int maxId[blockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
topVal += blockIdx.x * ldv;
topIds += blockIdx.x * beamSize;
Pair topK[maxLength]; // NOLINT
int beam = maxLength;
Pair max;
bool isEmpty = false;
bool firstStep = true;
int start = row[blockIdx.x];
int end = row[blockIdx.x + 1];
int dim = end - start;
val += start;
col += start;
if (beamSize > dim) {
// if the number of values to sort are less than the output size,
// use -1 to indicate the end of valid sorted values.
if (tid == 0) {
topIds[dim] = -1;
}
beamSize = dim;
}
for (int k = 0; k < maxLength; k++) {
topK[k].set(-HL_FLOAT_MAX, -1);
}
while (beamSize) {
threadGetTopK<maxLength, blockSize>(
topK, beam, beamSize, val, col, firstStep, isEmpty, max, dim, tid);
shTopK[tid] = topK[0];
blockReduce<maxLength, blockSize>(
shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp);
}
}
void hl_matrix_top_k(real* topVal,
int ldv,
int* topIds,
real* src,
int lds,
int dim,
int beamSize,
int numSamples) {
CHECK_NOTNULL(topVal);
CHECK_NOTNULL(topIds);
CHECK_NOTNULL(src);
if (beamSize > dim) beamSize = dim;
dim3 threads(256, 1);
dim3 grid(numSamples, 1);
KeMatrixTopK<5, 256><<<grid, threads, 0, STREAM_DEFAULT>>>(
topVal, ldv, topIds, src, lds, dim, beamSize);
CHECK_SYNC("hl_matrix_top_k failed");
}
void hl_sparse_matrix_top_k(real* topVal,
int ldv,
int* topIds,
hl_sparse_matrix_s src,
int beamSize,
int numSamples) {
CHECK_NOTNULL(topVal);
CHECK_NOTNULL(topIds);
CHECK_NOTNULL(src);
CHECK_EQ(src->format, HL_SPARSE_CSR) << "sparse matrix format error!";
hl_csr_matrix csr = (hl_csr_matrix)src->matrix;
if (csr->csr_val == NULL || csr->csr_row == NULL || csr->csr_col == NULL) {
LOG(FATAL) << "parameter src is null!";
}
dim3 threads(256, 1);
dim3 grid(numSamples, 1);
KeSMatrixTopK<5, 256><<<grid, threads, 0, STREAM_DEFAULT>>>(
topVal, ldv, topIds, csr->csr_val, csr->csr_row, csr->csr_col, beamSize);
CHECK_SYNC("hl_sparse_matrix_top_k failed");
}
/**
* Each block compute one sample.
* In a block:
* 1. every thread get top maxLength value;
* 2. merge to shTopK, block reduce and get max value;
* 3. go to the second setp, until one thread's topK value is null;
* 4. go to the first setp, until get the topK value.
*/
template <int maxLength, int blockSize>
__global__ void KeMatrixTopKClassificationError(real* topVal,
int ldv,
int* topIds,
real* src,
int lds,
int dim,
int beamSize,
int* label,
real* recResult) {
__shared__ Pair shTopK[blockSize];
__shared__ int maxId[blockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
src += blockIdx.x * lds;
topVal += blockIdx.x * ldv;
topIds += blockIdx.x * beamSize;
Pair topK[maxLength]; // NOLINT
int beam = maxLength;
Pair max;
bool isEmpty = false;
bool firstStep = true;
int topkSize = beamSize;
for (int k = 0; k < maxLength; k++) {
topK[k].set(-HL_FLOAT_MAX, -1);
}
while (beamSize) {
threadGetTopK<maxLength, blockSize>(
topK, beam, beamSize, src, firstStep, isEmpty, max, dim, tid);
shTopK[tid] = topK[0];
blockReduce<maxLength, blockSize>(
shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp);
}
__syncthreads();
if (tid == 0) {
for (int i = 0; i < topkSize; i++) {
if (*--topIds == label[blockIdx.x]) {
recResult[blockIdx.x] = 0;
break;
}
recResult[blockIdx.x] = 1.0f;
}
}
}
void hl_matrix_classification_error(real* topVal,
int ldv,
int* topIds,
real* src,
int lds,
int dim,
int topkSize,
int numSamples,
int* label,
real* recResult) {
CHECK_NOTNULL(topVal);
CHECK_NOTNULL(topIds);
CHECK_NOTNULL(src);
if (topkSize > dim) topkSize = dim;
dim3 threads(256, 1);
dim3 grid(numSamples, 1);
KeMatrixTopKClassificationError<5, 256><<<grid, threads, 0, STREAM_DEFAULT>>>(
topVal, ldv, topIds, src, lds, dim, topkSize, label, recResult);
CHECK_SYNC("hl_matrix_top_k classification error failed");
}
|
1bdff5127683b20eed5fce7af36029f9667bb4e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
The implementation of this file is based on qkvToContext plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Modifications:
// (1) support GPT-2 past state, unidirectional mask and 4D attention mask from Megatron
// (2) support 2D attention mask
// (3) allow persistent softmax from PyTorch for debugging purpose.
// (4) support different input hidden size and model hidden size for pruned model
// (5) support different hidden sizes of Q/K and V
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cassert>
#include <hip/hip_fp16.h>
#include <hipcub/hipcub.hpp>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/shared_inc/fpgeneric.h"
#include "contrib_ops/cuda/bert/attention_impl.h"
#include "contrib_ops/cuda/bert/attention_softmax.h"
#include "contrib_ops/cuda/bert/transformer_common.h"
#include "contrib_ops/cuda/bert/add_bias_transpose.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/mha_runner.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/cross_attention/fmha_cross_attention.h"
#include "contrib_ops/cpu/bert/attention_base.h"
#include "contrib_ops/cuda/bert/bert_padding.h"
#include "contrib_ops/cuda/transformers/dump_cuda_tensor.h"
#include "contrib_ops/cuda/bert/cutlass_fmha/memory_efficient_attention.h"
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
constexpr size_t kMemoryAlignment = 256;
static size_t AlignTo(size_t a, size_t b) {
return CeilDiv(a, b) * b;
}
size_t AlignSize(size_t bytes) {
const size_t bytesAligned = AlignTo(bytes, kMemoryAlignment);
return bytesAligned;
}
void CumulatedSequenceLengthCache::Initialize(int32_t sequence_length, hipStream_t stream) {
if (this->sequence_length != sequence_length) {
ORT_ENFORCE(buffer.get() != nullptr && this->max_batch_size > 0);
LaunchTrtSequenceOffset(reinterpret_cast<int32_t*>(buffer.get()), nullptr, this->max_batch_size, sequence_length, stream);
this->sequence_length = sequence_length;
}
}
int* GetCumulatedSequenceLength(CumulatedSequenceLengthCache* cache,
const int* mask_index,
int batch_size,
int sequence_length,
hipStream_t stream,
void* scratch_buffer) {
if (mask_index == nullptr && cache != nullptr) {
if (batch_size <= cache->max_batch_size) {
cache->Initialize(sequence_length, stream);
return reinterpret_cast<int*>(cache->buffer.get());
}
}
int* sequence_offset = reinterpret_cast<int*>(scratch_buffer);
LaunchTrtSequenceOffset(sequence_offset, mask_index, batch_size, sequence_length, stream);
return sequence_offset;
}
size_t GetAttentionScratchSize(
size_t element_size,
size_t batch_size,
size_t num_heads,
size_t sequence_length,
size_t total_sequence_length) {
const size_t bytes = element_size * batch_size * num_heads * sequence_length * total_sequence_length;
return AlignSize(bytes);
}
size_t GetSequenceOffsetSize(int batch_size, bool has_padding) {
// There are batch_size + 1 offsets Without padding (or padding removed), and 2 * batch_size + 1 with padding.
size_t bytes = sizeof(int) * ((has_padding ? 2 * batch_size : batch_size) + 1);
return AlignSize(bytes);
;
}
size_t GetAttentionWorkspaceSize(
size_t element_size,
size_t batch_size,
size_t num_heads,
size_t qk_head_size,
size_t v_head_size,
size_t sequence_length,
size_t kv_sequence_length,
size_t total_sequence_length,
void* fused_runner,
bool use_fused_cross_attention,
bool use_memory_efficient_attention) {
// Note that q, k and v might need alignment for fused attention kernels.
const size_t qkv_bytes = element_size * batch_size * num_heads *
((sequence_length + kv_sequence_length) * qk_head_size + kv_sequence_length * v_head_size);
#if USE_FLASH_ATTENTION
if (use_memory_efficient_attention) {
size_t fmha_buffer_bytes = 0;
if (MemoryEfficientAttentionParams::need_workspace(v_head_size, element_size == sizeof(float))) {
fmha_buffer_bytes = batch_size * sequence_length * num_heads * v_head_size * sizeof(float);
}
return qkv_bytes + fmha_buffer_bytes;
}
#else
ORT_UNUSED_PARAMETER(use_memory_efficient_attention);
#endif
if (fused_runner != nullptr) {
return qkv_bytes + GetSequenceOffsetSize(static_cast<int>(batch_size), true);
}
if (use_fused_cross_attention) {
return qkv_bytes + 2 * GetSequenceOffsetSize(static_cast<int>(batch_size), true);
}
return qkv_bytes + 2 * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length,
total_sequence_length);
}
template <typename T>
__global__ void AddBiasTransAppendKvToPresentSmall(
const T* qkv, const T* biases, T* present,
const int head_size, const int past_sequence_length, const int max_sequence_length) {
// Input: BxSxMxNxH (Format 1)
// Output: (2, B, N, [P..P+S) of MaxS, H),
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
const int n = threadIdx.y;
const int s = blockIdx.x;
const int b = blockIdx.y;
const int N = blockDim.y;
const int S = gridDim.x;
const int B = gridDim.y;
constexpr int M = 3; // Matrix count in qkv
const int m = blockIdx.z + 1; // k = 1, v = 2
const int NH = N * head_size;
const int NHS = NH * S;
qkv += (n * head_size + (s * M + m) * NH + b * M * NHS);
if (biases) {
biases += (m * NH + n * head_size);
}
const int MsH = max_sequence_length * head_size;
const int NMsH = N * MsH;
const int BNMsH = B * NMsH;
present += ((past_sequence_length + s) * head_size + n * MsH + b * NMsH + (m - 1) * BNMsH);
for (int h = threadIdx.x; h < head_size; h += blockDim.x) {
T bias = (biases ? biases[h] : (T)0.0f);
present[h] = qkv[h] + bias;
}
}
template <typename T>
__global__ void AddBiasTransAppendKvToPresent(
const T* qkv, const T* biases, T* present,
const int head_size, const int past_sequence_length, const int max_sequence_length) {
// Input: BxSxMxNxH (Format 1)
// Output: (2, B, N, [P..P+S) of MaxS, H),
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
const int n = blockIdx.x;
const int s = blockIdx.y;
const int b = (blockIdx.z >> 1);
const int N = gridDim.x;
const int S = gridDim.y;
const int B = (gridDim.z >> 1);
constexpr int M = 3; // Matrix count in qkv
const int m = (blockIdx.z & 0x1) + 1; // k = 1, v = 2
const int NH = N * head_size;
const int NHS = NH * S;
qkv += (n * head_size + (s * M + m) * NH + b * M * NHS);
if (biases) {
biases += (m * NH + n * head_size);
}
const int MsH = max_sequence_length * head_size;
const int NMsH = N * MsH;
const int BNMsH = B * NMsH;
present += ((past_sequence_length + s) * head_size + n * MsH + b * NMsH + (m - 1) * BNMsH);
for (int h = threadIdx.x; h < head_size; h += blockDim.x) {
T bias = (biases ? biases[h] : (T)0.0f);
present[h] = qkv[h] + bias;
}
}
// qkv buffer is merged tensor of shape (B,S,3,N,H), k v is the second/third of the 3.
// bias is of shape (3, NxH) or nullptr
// append to present of (2, B, N, (P..T) of M, H),
template <typename T>
Status LaunchAddBiasTransAppendKvToPresent(hipStream_t stream,
const int max_sequence_length,
const int past_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const T* biases,
const T* qkv_buffer,
T* present) {
assert(head_size <= (1 << 30));
int64_t nh = (int64_t)head_size * num_heads;
if (nh <= max_threads_per_block) {
const dim3 grid(sequence_length, batch_size, 2); // 2 for k and v
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
hipLaunchKernelGGL(( AddBiasTransAppendKvToPresentSmall<T>), dim3(grid), dim3(block), 0, stream,
qkv_buffer, biases, present, head_size, past_sequence_length, max_sequence_length);
} else {
const dim3 grid(num_heads, sequence_length, batch_size * 2); // 2 for k and v
const dim3 block(::min(head_size, max_threads_per_block), 1, 1);
hipLaunchKernelGGL(( AddBiasTransAppendKvToPresent<T>), dim3(grid), dim3(block), 0, stream,
qkv_buffer, biases, present, head_size, past_sequence_length, max_sequence_length);
}
return CUDA_CALL(hipGetLastError());
}
template Status LaunchAddBiasTransAppendKvToPresent(hipStream_t stream,
const int max_sequence_length,
const int total_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const float* bias,
const float* qkv_buffer,
float* present);
template Status LaunchAddBiasTransAppendKvToPresent(hipStream_t stream,
const int max_sequence_length,
const int total_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const half* bias,
const half* qkv_buffer,
half* present);
template <typename T>
Status PrepareQkv(contrib::AttentionParameters& parameters,
AttentionData<T>& data,
hipStream_t stream,
int max_threads_per_block,
T* q, T* k, T* v, AttentionQkvFormat& qkv_format) {
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int kv_sequence_length = parameters.kv_sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
const bool past_present_share_buffer = parameters.past_present_share_buffer;
void* fused_runner = data.fused_runner;
bool use_memory_efficient_attention = data.use_memory_efficient_attention;
T* qkv = data.workspace;
bool use_fused_kernel = (nullptr != fused_runner && !parameters.is_unidirectional);
bool use_fused_causal = (nullptr != fused_runner && parameters.is_unidirectional);
// Default format for memory efficient attention.
// When there is past state, the format shal be BxNxSxH, so we disable memory efficient attention when there is past.
DUMP_TENSOR_INIT();
if (nullptr != data.gemm_buffer) {
if (data.bias == nullptr) {
assert(nullptr == fused_runner);
// For quantized attention, bias has been added so only need transpose here.
// gemm_buffer should be BxSx3xNxH => qkv: 3xBxNxSxH
assert(qk_head_size == v_head_size);
int matrix_to_trans = (past_present_share_buffer ? 1 : 3);
ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, matrix_to_trans, sequence_length, batch_size, qk_head_size, num_heads,
max_threads_per_block, false, data.gemm_buffer, qkv, 3));
qkv_format = AttentionQkvFormat::Q_K_V_BNSH;
} else {
// For fused TRT attention, transpose qkv to BxSxNx3xH (format 2)
// For memory efficient attention, transpose to 3xBxSxNxH (format 3)
// For unfused kernel, transpose to 3xBxNxSxH (format 1)
// For fused causal kernel, use format 1 since we need have K and V to update present state,
// at the same time, we update gemm_buffer BxSx3xNxH with bias which is used as input for fused causal kernel.
const int format = (use_fused_kernel ? 2 : (use_memory_efficient_attention ? 3 : 1));
qkv_format = use_fused_kernel
? AttentionQkvFormat::QKV_BSN3H
: (use_memory_efficient_attention
? AttentionQkvFormat::Q_K_V_BSNH
: (use_fused_causal ? AttentionQkvFormat::Q_K_V_BNSH_QKV_BS3NH : AttentionQkvFormat::Q_K_V_BNSH));
// For fused causal, we will update gemm_buffer with bias directly.
T* qkv_add_bias = use_fused_causal ? data.gemm_buffer : nullptr;
int matrix_to_transpose = ((format == AttentionQkvFormat::Q_K_V_BNSH && past_present_share_buffer) ? 1 : 3);
// format 1: BxSx(NH + NH + NH_v) => BxNxSxH + BxNxSxH + BxNxSxH_v
// format 2: BxSx(NH + NH + NH) => BxSxNx(H + H + H)
LaunchAddBiasTranspose(stream, matrix_to_transpose, format, max_threads_per_block,
batch_size, sequence_length, num_heads, qk_head_size,
data.gemm_buffer, data.bias, qkv, true, v_head_size, qkv_add_bias,
3, parameters.do_rotary, parameters.original_past_sequence_length);
}
} else if (data.key == nullptr) { // gemm_buffer == nullptr and packed qkv
assert(data.bias == nullptr);
assert(qk_head_size == v_head_size);
DUMP_TENSOR_D("packed_qkv", data.query, batch_size * sequence_length, num_heads, 3, qk_head_size);
if (use_memory_efficient_attention) {
// unpack qkv to BSNH. Note that there is no bias so we need not output query to q.
constexpr int format = 4;
T* qkv_add_bias = nullptr;
LaunchAddBiasTranspose(stream, 3, format, max_threads_per_block,
batch_size, sequence_length, num_heads, qk_head_size,
data.query, data.bias, qkv,
true, v_head_size, qkv_add_bias, 3);
DUMP_TENSOR_D("k(BSNH)", q, batch_size * sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("k(BSNH)", k, batch_size * kv_sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("v(BSNH)", v, batch_size * kv_sequence_length, num_heads, v_head_size);
qkv_format = AttentionQkvFormat::Q_K_V_BSNH;
} else {
if (!use_fused_kernel) {
return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "packed QKV format is not implemented for current GPU. Please disable it in fusion options.");
}
qkv_format = AttentionQkvFormat::QKV_BSN3H;
}
} else if (data.value == nullptr) { // gemm_buffer == nullptr and packed kv
// TODO: unpack kv to BNSH for unfused kernel so that we can remove the following constraint.
// CheckInputs verified this constraint.
assert(data.bias == nullptr);
assert(qk_head_size == v_head_size);
DUMP_TENSOR_D("packed_kv", data.key, batch_size * kv_sequence_length, num_heads, 2, qk_head_size);
if (use_memory_efficient_attention) {
// unpack kv to BSNH. Note that there is no bias so we need not output query to q.
constexpr int format = 4;
T* qkv_add_bias = nullptr;
const T* kv_bias = (data.bias == nullptr ? data.bias : data.bias + parameters.hidden_size);
LaunchAddBiasTranspose(stream, 2, format, max_threads_per_block,
batch_size, kv_sequence_length, num_heads, qk_head_size,
data.key, kv_bias, k,
true, v_head_size, qkv_add_bias, 2);
DUMP_TENSOR_D("k(BSNH)", k, batch_size * kv_sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("v(BSNH)", v, batch_size * kv_sequence_length, num_heads, v_head_size);
qkv_format = AttentionQkvFormat::Q_K_V_BSNH;
} else {
if (data.fused_cross_attention_kernel == nullptr) {
return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "packed KV format is not implemented for current GPU. Please disable packed kv in fusion options.");
}
qkv_format = AttentionQkvFormat::Q_KV_BSNH_BSN2H;
}
} else { // gemm_buffer == nullptr and not packed
assert(data.query != nullptr && data.key != nullptr && data.value != nullptr && data.bias != nullptr);
DUMP_TENSOR_D("query", data.query, batch_size * sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("query_bias", data.bias, num_heads, qk_head_size);
DUMP_TENSOR_D("key", data.key, batch_size * kv_sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("key_bias", data.bias + num_heads * qk_head_size, num_heads, qk_head_size);
DUMP_TENSOR_D("value", data.value, batch_size * kv_sequence_length, num_heads, v_head_size);
DUMP_TENSOR_D("value_bias", data.bias + 2 * num_heads * qk_head_size, num_heads, v_head_size);
if (data.fused_cross_attention_kernel != nullptr) {
assert(qk_head_size == v_head_size);
// For fused cross attention, besides adding bias, K and V needed to be packed:
// K (BxSxNxH), V (BxSxNxH) => BxSxNx2xH
LaunchAddBiasTransposeTrt(
stream, max_threads_per_block,
batch_size, sequence_length,
num_heads, qk_head_size,
data.bias, data.query, data.key, data.value, qkv, true, kv_sequence_length);
qkv_format = AttentionQkvFormat::Q_KV_BSNH_BSN2H;
}
#if USE_FLASH_ATTENTION
else if (use_memory_efficient_attention) {
LaunchAddBias(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length,
num_heads, qk_head_size, v_head_size,
data.bias, data.query, data.key, data.value, q, k, v);
DUMP_TENSOR_D("q(BSNH)", q, batch_size * sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("k(BSNH)", k, batch_size * kv_sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("v(BSNH)", v, batch_size * kv_sequence_length, num_heads, v_head_size);
qkv_format = AttentionQkvFormat::Q_K_V_BSNH;
}
#endif
else if (use_fused_kernel) {
assert(qk_head_size == v_head_size);
// Q (BxSxNxH), K (BxSxNxH), V (BxSxNxH) => BxSxNx(H + H + H)
LaunchAddBiasTransposeTrt(
stream, max_threads_per_block,
batch_size, sequence_length,
num_heads, qk_head_size,
data.bias, data.query, data.key, data.value, qkv, false, kv_sequence_length);
DUMP_TENSOR_D("qkv(BSN3H)", qkv, batch_size, sequence_length, num_heads, 2 * qk_head_size + v_head_size);
qkv_format = AttentionQkvFormat::QKV_BSN3H;
} else { // unfused kernel
ORT_ENFORCE(!use_fused_causal, "MultiHeadAttention has not enabled fused causal");
// Query (BxSxNxH) => Q (BxNxSxH)
constexpr int format = 0;
LaunchAddBiasTranspose<T>(stream, 1, format, max_threads_per_block,
batch_size, sequence_length, num_heads, qk_head_size,
data.query, data.bias, q,
true, -1);
// Key (BxLxNxH) => K (BxNxLxH)
LaunchAddBiasTranspose<T>(stream, 1, format, max_threads_per_block,
batch_size, kv_sequence_length, num_heads, qk_head_size,
data.key, data.bias + num_heads * qk_head_size, k,
true, -1);
// Value (BxLxNxH_v) => K (BxNxLxH_v)
LaunchAddBiasTranspose<T>(stream, 1, format, max_threads_per_block,
batch_size, kv_sequence_length, num_heads, v_head_size,
data.value, data.bias + 2 * num_heads * qk_head_size, v,
true, -1);
DUMP_TENSOR_D("q(BNSH)", q, batch_size * num_heads, sequence_length, qk_head_size);
DUMP_TENSOR_D("k(BNSH)", k, batch_size * num_heads, kv_sequence_length, qk_head_size);
DUMP_TENSOR_D("v(BNSH)", v, batch_size * num_heads, kv_sequence_length, v_head_size);
qkv_format = AttentionQkvFormat::Q_K_V_BNSH;
}
}
CUDA_RETURN_IF_ERROR(hipGetLastError());
return Status::OK();
}
template <typename T>
Status QkvToContext(
const hipDeviceProp_t& device_prop,
hipblasHandle_t& cublas,
hipStream_t stream,
contrib::AttentionParameters& parameters,
AttentionData<T>& data) {
constexpr size_t element_size = sizeof(T);
const int max_threads_per_block = device_prop.maxThreadsPerBlock;
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int kv_sequence_length = parameters.kv_sequence_length;
const int total_sequence_length = parameters.total_sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
const bool past_present_share_buffer = parameters.past_present_share_buffer;
const float mask_filter_value = parameters.mask_filter_value;
void* fused_runner = data.fused_runner;
// At most one fused kernel is enabled.
assert(int(data.use_memory_efficient_attention) + int(fused_runner != nullptr) + int(data.fused_cross_attention_kernel != nullptr) <= 1);
const int batches = batch_size * num_heads;
T* qkv = nullptr;
T* q = nullptr;
T* k = nullptr;
T* v = nullptr;
T* scratch1 = data.workspace;
if (data.has_qkv_workspace) {
const int size_per_batch_q = sequence_length * qk_head_size;
const int size_per_batch_k = kv_sequence_length * qk_head_size;
const int size_per_batch_v = kv_sequence_length * v_head_size;
const size_t elements_q = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_q);
const size_t elements_k = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_k);
const size_t elements_v = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_v);
qkv = data.workspace;
q = qkv;
k = q + elements_q;
v = k + elements_k;
scratch1 = v + elements_v;
}
bool use_fused_kernel = (nullptr != fused_runner && !parameters.is_unidirectional);
bool use_fused_causal = (nullptr != fused_runner && parameters.is_unidirectional);
AttentionQkvFormat qkv_format = AttentionQkvFormat::Q_K_V_BSNH;
ORT_RETURN_IF_ERROR(PrepareQkv<T>(parameters, data, stream, max_threads_per_block, q, k, v, qkv_format));
int present_size_per_batch_k = 0;
int present_size_per_batch_v = 0;
if (!past_present_share_buffer) {
// Concat past key value to present (2xBxNxLxH), where L is kv_sequence_length and T is total_sequence_length.
// past_k (BxNxPxH) + k (BxNxLxH) => present_k (BxNxTxH)
// past_v (BxNxPxH) + v (BxNxLxH) => present_v (BxNxTxH)
// When there is past state, the head size for Q/K/V shall be same: H == H_v.
present_size_per_batch_k = total_sequence_length * qk_head_size;
present_size_per_batch_v = total_sequence_length * v_head_size;
if (nullptr != data.present) {
assert(qkv_format == AttentionQkvFormat::Q_K_V_BNSH || qkv_format == AttentionQkvFormat::Q_K_V_BNSH_QKV_BS3NH);
ORT_RETURN_IF_ERROR(
LaunchConcatPastToPresent(stream, total_sequence_length, sequence_length, batch_size, qk_head_size, num_heads,
max_threads_per_block, data.past, k, data.present));
// Update pointers to present_k and present_v.
k = data.present;
v = data.present + batches * present_size_per_batch_k;
}
} else {
assert(qk_head_size == v_head_size);
assert(data.fused_cross_attention_kernel == nullptr);
assert(!use_fused_kernel);
assert(data.gemm_buffer != nullptr);
assert(!data.use_memory_efficient_attention);
assert(data.has_qkv_workspace);
if (data.present != data.past) {
// For easy testing. Production should better avoid this path.
int64_t kv_size = 2LL * (int64_t)batch_size * num_heads * parameters.max_sequence_length * qk_head_size;
hipMemcpyAsync(data.present, data.past, kv_size * sizeof(T), hipMemcpyDeviceToDevice, stream);
}
// append last k v to present
ORT_RETURN_IF_ERROR(LaunchAddBiasTransAppendKvToPresent(
stream, parameters.max_sequence_length, parameters.past_sequence_length, sequence_length,
batch_size, qk_head_size, num_heads, max_threads_per_block,
use_fused_causal ? nullptr : data.bias, // For fused causal, bias has been added to gemm_buffer
data.gemm_buffer, data.present));
present_size_per_batch_k = parameters.max_sequence_length * qk_head_size;
present_size_per_batch_v = present_size_per_batch_k;
k = data.present;
v = data.present + batches * present_size_per_batch_k;
}
// Q, K and V are ready now
DUMP_TENSOR_INIT();
if (data.fused_cross_attention_kernel != nullptr) {
assert(qkv_format == AttentionQkvFormat::Q_KV_BSNH_BSN2H);
// We only enable fused cross attention when there is no key padding mask.
// Otherwise, key have effective batch size 2 * batch_size, which is different from batch_size of query.
assert(data.mask_index == nullptr);
int* q_sequence_offset = GetCumulatedSequenceLength(data.cumulated_sequence_length_q_cache,
data.mask_index, batch_size, sequence_length, stream,
scratch1);
DUMP_TENSOR_D("q_sequence_offset", q_sequence_offset, 1, batch_size + 1);
int* kv_sequence_offset = q_sequence_offset + (GetSequenceOffsetSize(batch_size, false) / sizeof(int));
kv_sequence_offset = GetCumulatedSequenceLength(data.cumulated_sequence_length_kv_cache,
data.mask_index, batch_size, kv_sequence_length, stream,
kv_sequence_offset);
CUDA_RETURN_IF_ERROR(hipGetLastError());
DUMP_TENSOR_D("kv_sequence_offset", kv_sequence_offset, 1, batch_size + 1);
FusedMultiHeadCrossAttentionKernel const* cross_attention_kernel =
reinterpret_cast<FusedMultiHeadCrossAttentionKernel const*>(data.fused_cross_attention_kernel);
// When there is no bias, we can directly use q and packed kv from inputs.
void const* query = q;
void const* packed_kv = k;
if (data.value == nullptr && data.bias == nullptr) {
query = data.query;
packed_kv = data.key;
}
run_fused_cross_attention(
query, // Q
packed_kv, // packed KV
q_sequence_offset, // cumulated sequence length of Q
kv_sequence_offset, // cumulated sequence length of KV
data.output, // output
cross_attention_kernel, // kernels
batch_size, // batch size
num_heads, // number of heads
qk_head_size, // head size of Q/K/V
sequence_length, // sequence length of Q
kv_sequence_length, // sequence length of KV
stream);
DUMP_TENSOR("trt cross output", data.output, batch_size * sequence_length, num_heads, v_head_size);
return Status::OK();
}
// Run TRT fused attention.
if (use_fused_kernel || use_fused_causal) {
int* sequence_offset = reinterpret_cast<int*>(scratch1);
if (parameters.mask_type == AttentionMaskType::MASK_2D_KEY_PADDING) {
DUMP_TENSOR_D("mask", reinterpret_cast<const int*>(data.mask_index), batch_size, sequence_length);
LaunchTrtSequenceOffset2d(sequence_offset, data.mask_index, batch_size, sequence_length, stream);
} else {
sequence_offset = GetCumulatedSequenceLength(data.cumulated_sequence_length_q_cache,
data.mask_index, batch_size, sequence_length, stream,
sequence_offset);
}
DUMP_TENSOR_D("sequence_offset", sequence_offset, 1, (data.mask_index != nullptr ? 2 : 1) * batch_size + 1);
CUDA_RETURN_IF_ERROR(hipGetLastError());
FusedMHARunnerFP16v2* fused_fp16_runner = reinterpret_cast<FusedMHARunnerFP16v2*>(fused_runner);
const int S = use_fused_causal ? sequence_length : fused_fp16_runner->getSFromMaxSeqLen(sequence_length);
// B = 2 * batch_size when there is padding in input, and B = batch_size when padding is removed.
const int B = (nullptr == data.mask_index ? batch_size : 2 * batch_size);
fused_fp16_runner->setup(S, B);
if (use_fused_kernel) {
assert(qkv_format == AttentionQkvFormat::QKV_BSN3H);
// When there is no bias, we can directly use packed qkv from inputs.
void const* packed_qkv = qkv;
if (data.query != nullptr && data.key == nullptr && data.bias == nullptr) {
packed_qkv = data.query;
}
fused_fp16_runner->run(packed_qkv, sequence_offset, data.output, stream);
DUMP_TENSOR("fused output", data.output, batch_size * sequence_length, num_heads, v_head_size);
} else {
assert(qkv_format == AttentionQkvFormat::Q_K_V_BNSH_QKV_BS3NH);
fused_fp16_runner->run(data.gemm_buffer, sequence_offset, data.output, stream);
DUMP_TENSOR("fused causal output", data.output, batch_size * sequence_length, num_heads, v_head_size);
}
return Status::OK();
}
#if USE_FLASH_ATTENTION
if (data.use_memory_efficient_attention) {
// We only enable fused cross attention when there is no key padding mask.
// Otherwise, key have effective batch size 2 * batch_size, which is different from batch_size of query.
assert(data.mask_index == nullptr);
assert(qkv_format == AttentionQkvFormat::Q_K_V_BSNH);
const void* query = q;
const void* key = k;
const void* value = v;
// For packed KV, we can use query input directly.
if (data.gemm_buffer == nullptr && data.key != nullptr && data.value == nullptr) {
assert(data.bias == nullptr);
query = data.query;
}
MemoryEfficientAttentionParams p;
p.sm = device_prop.major * 10 + device_prop.minor;
p.is_half = sizeof(T) == 2;
p.batch_size = data.mask_index == nullptr ? parameters.batch_size : 2 * parameters.batch_size;
p.num_heads = parameters.num_heads;
p.sequence_length = parameters.sequence_length;
p.kv_sequence_length = parameters.total_sequence_length;
p.qk_head_size = parameters.head_size;
p.v_head_size = parameters.v_head_size;
p.causal = parameters.is_unidirectional;
p.cu_seqlens_q = nullptr;
p.cu_seqlens_k = nullptr;
p.query = query;
p.key = key;
p.value = value;
p.output = data.output;
p.workspace = MemoryEfficientAttentionParams::need_workspace(v_head_size, sizeof(T) == sizeof(float)) ? scratch1 : nullptr;
p.stream = stream;
run_memory_efficient_attention(p);
DUMP_TENSOR("cutlass output", data.output, batch_size * sequence_length, num_heads, v_head_size);
return Status::OK();
}
#endif
// The following are unfused attention.
assert(qkv_format == AttentionQkvFormat::Q_K_V_BNSH);
const int* mask_index = data.mask_index;
gsl::span<const int64_t>& mask_index_dims = data.mask_index_dims;
// Raw attention mask could be 2D (BxT) or 3D (BxSxT) or 4D(Bx1xMxM), where M is the max sequence length.
bool use_raw_attention_mask = (nullptr != mask_index && mask_index_dims.size() >= 2);
// Compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxT
// Q: BxNxSxH, K (present_k): BxNxTxH, Q*K': BxNxSxT
float one = 1.0f;
float zero = 0.f;
// For raw attention mask, the scalar 1/sqrt(H) is moved to combine with softmax computation.
const float scale = parameters.scale == 0.0f ? 1.f / sqrt(static_cast<float>(qk_head_size))
: parameters.scale;
float alpha = use_raw_attention_mask ? one : scale;
hipblasSetStream(cublas, stream);
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, HIPBLAS_OP_T, HIPBLAS_OP_N,
total_sequence_length, sequence_length, qk_head_size,
&alpha, k, qk_head_size, present_size_per_batch_k,
q, qk_head_size, sequence_length * qk_head_size,
&zero, scratch1, total_sequence_length, sequence_length * total_sequence_length, batches, device_prop));
DUMP_TENSOR_D("QK", scratch1, batch_size * num_heads, sequence_length, total_sequence_length);
const size_t bytes = GetAttentionScratchSize(element_size, batch_size, num_heads,
sequence_length, total_sequence_length);
T* scratch2 = scratch1 + (bytes / element_size);
// Apply softmax and store result R to scratch2: BxNxSxT
if (use_raw_attention_mask) { // 2d, 3d or 4d attention mask
const int mask_dimension = static_cast<int>(mask_index_dims.size());
// For testing, environment variable ORT_TRANSFORMER_OPTIONS=1 could enable persistent softmax used in Torch.
const TransformerOptions* options = TransformerOptions::GetInstance();
bool use_persistent_softmax = options->IsPrecisionMode() && !options->DisablePersistentSoftmax();
T* persistent_softmax_workspace = scratch1; // replace Q*K' in place with masked score for persistent softmax.
ORT_RETURN_IF_ERROR(
ComputeSoftmaxWithRawMask<T>(stream, total_sequence_length, sequence_length, batch_size, num_heads,
mask_index, nullptr, data.relative_position_bias, scratch1, scratch2,
parameters.is_unidirectional, scale, mask_dimension,
parameters.max_sequence_length, use_persistent_softmax,
persistent_softmax_workspace, mask_filter_value));
} else if (nullptr != mask_index) { // 1d mask index
assert(mask_index_dims.size() == 1);
// mask_index has 1D shape: either (batch_size) or (2*batch_size). Only the later one has start postions.
const int* mask_start = (mask_index_dims[0] > batch_size) ? mask_index + batch_size : nullptr;
ORT_RETURN_IF_ERROR(ComputeSoftmaxWithMask1D<T>(
stream, total_sequence_length, sequence_length, batch_size, num_heads,
mask_index, mask_start, data.relative_position_bias, scratch1, scratch2, parameters.is_unidirectional));
} else { // no mask
ORT_RETURN_IF_ERROR(
ComputeSoftmax<T>(stream, total_sequence_length, sequence_length, batch_size, num_heads, data.relative_position_bias,
scratch1, scratch2, parameters.is_unidirectional));
}
DUMP_TENSOR_D("Softmax", scratch2, batch_size * num_heads, sequence_length, total_sequence_length);
// compute R*V (as V*R), and store in temp_output (space used by Q): BxNxSxH_v
T* temp_output = qkv;
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, HIPBLAS_OP_N, HIPBLAS_OP_N,
v_head_size, sequence_length, total_sequence_length,
&one, v, v_head_size, present_size_per_batch_v,
scratch2, total_sequence_length, sequence_length * total_sequence_length,
&zero, temp_output, v_head_size, sequence_length * v_head_size, batches, device_prop));
// Temp_output is BxNxSxH_v, transpose to output BxSxNxH_v
Status result = LaunchTransCtx(stream, sequence_length, batch_size, v_head_size, num_heads,
max_threads_per_block, false, temp_output, data.output);
DUMP_TENSOR("unfused output", data.output, batch_size * sequence_length, num_heads, v_head_size);
return result;
}
template <typename T>
Status DecoderQkvToContext(
const hipDeviceProp_t& device_prop,
hipStream_t stream,
hipblasHandle_t& cublas,
const size_t element_size,
const int batch_size,
const int sequence_length,
const int kv_sequence_length,
const int num_heads,
const int head_size,
const bool static_kv,
const bool use_past,
const bool has_layer_state,
const bool has_key_padding_mask,
const float mask_filter_value,
const T* gemm_query_buffer,
const T* gemm_kv_buffer,
const bool* key_padding_mask,
const T* key_cache,
const T* value_cache,
T* qkv_buffer,
T* workspace_buffer,
T* output,
T* new_key_cache,
T* new_value_cache) {
const int max_threads_per_block = device_prop.maxThreadsPerBlock;
const int BN = batch_size * num_heads;
const int BHN = BN * head_size;
const int BNS = BN * sequence_length;
const int k_buffer_offset = sequence_length * BHN;
const int v_buffer_offset = (sequence_length + kv_sequence_length) * BHN;
T* temp_qkv_buffer = workspace_buffer;
const T* q = qkv_buffer;
// transpose q and copy them to qkv_buffer
ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, 1, sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, true, gemm_query_buffer, qkv_buffer));
const T* k = qkv_buffer + k_buffer_offset;
const T* v = qkv_buffer + v_buffer_offset;
if (!has_layer_state || !use_past) {
if (!static_kv) {
// transpose kv and copy them to qkv_buffer
ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, 2, sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, true, gemm_kv_buffer, qkv_buffer + k_buffer_offset));
} else {
// transpose kv and copy them to qkv_buffer
ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, 2, kv_sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, true, gemm_kv_buffer, qkv_buffer + k_buffer_offset));
}
} else {
if (!static_kv) {
// transpose kv and copy them to temp_buffer
ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, 2, sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, true, gemm_kv_buffer, temp_qkv_buffer));
// concat cache-k with k and copy to qkv_buffer
if (nullptr != key_cache) {
ORT_RETURN_IF_ERROR(LaunchConcatTensorToTensor(stream, kv_sequence_length,
sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, 1,
key_cache,
temp_qkv_buffer,
qkv_buffer + k_buffer_offset));
}
// concat cache-v with v and copy to qkv_buffer
if (nullptr != value_cache) {
ORT_RETURN_IF_ERROR(LaunchConcatTensorToTensor(stream, kv_sequence_length,
sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, 1,
value_cache,
temp_qkv_buffer + k_buffer_offset,
qkv_buffer + v_buffer_offset));
}
}
}
if (has_layer_state) {
if (use_past && static_kv) {
CUDA_RETURN_IF_ERROR(hipMemcpyAsync(new_key_cache, key_cache, kv_sequence_length * BHN * sizeof(T),
hipMemcpyDeviceToDevice, stream));
CUDA_RETURN_IF_ERROR(hipMemcpyAsync(new_value_cache, value_cache, kv_sequence_length * BHN * sizeof(T),
hipMemcpyDeviceToDevice, stream));
} else {
CUDA_RETURN_IF_ERROR(hipMemcpyAsync(new_key_cache, k, kv_sequence_length * BHN * sizeof(T),
hipMemcpyDeviceToDevice, stream));
CUDA_RETURN_IF_ERROR(hipMemcpyAsync(new_value_cache, v, kv_sequence_length * BHN * sizeof(T),
hipMemcpyDeviceToDevice, stream));
}
}
// scratch1: BxNxSxL buffer
// scratch2: BxNxSxL buffer
// scratch3: BxNxSxH buffer
T* scratch1 = temp_qkv_buffer + 3 * BHN * sequence_length;
T* scratch2 = scratch1 + BNS * kv_sequence_length;
T* scratch3 = scratch2 + BNS * kv_sequence_length;
// compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxL
// Q: BxNxSxH, K (present_k): BxNxLxH, Q*K': BxNxSxL
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
const int temp_matrix_size = sequence_length * kv_sequence_length;
float one = 1.0f;
float zero = 0.f;
float alpha = rsqrt_head_size;
const int strideA = kv_sequence_length * head_size;
const int strideB = sequence_length * head_size;
if (use_past && static_kv) {
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, HIPBLAS_OP_T, HIPBLAS_OP_N,
kv_sequence_length, sequence_length, head_size,
&alpha, key_cache, head_size, strideA,
q, head_size, strideB,
&zero, scratch1, kv_sequence_length, temp_matrix_size, BN, device_prop));
} else {
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, HIPBLAS_OP_T, HIPBLAS_OP_N,
kv_sequence_length, sequence_length, head_size,
&alpha, k, head_size, strideA,
q, head_size, strideB,
&zero, scratch1, kv_sequence_length, temp_matrix_size, BN, device_prop));
}
constexpr bool is_unidirectional = false;
const T* add_before_softmax = nullptr;
if (has_key_padding_mask) {
constexpr int mask_dimension = 2;
constexpr int max_sequence_length = 0;
ORT_RETURN_IF_ERROR(ComputeSoftmaxWithRawMask<T>(stream, kv_sequence_length, sequence_length, batch_size, num_heads,
nullptr, key_padding_mask, add_before_softmax, scratch1, scratch2,
is_unidirectional, 1.0f, mask_dimension, max_sequence_length,
false, nullptr, mask_filter_value));
} else {
ORT_RETURN_IF_ERROR(ComputeSoftmax<T>(stream, kv_sequence_length, sequence_length, batch_size, num_heads,
add_before_softmax, scratch1, scratch2, is_unidirectional));
}
// compute P*V (as V*P), and store in scratch3: BxNxSxH
if (use_past && static_kv) {
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, HIPBLAS_OP_N, HIPBLAS_OP_N,
head_size, sequence_length, kv_sequence_length,
&one, value_cache, head_size, strideA,
scratch2, kv_sequence_length, temp_matrix_size,
&zero, scratch3, head_size, strideB, BN, device_prop));
} else {
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, HIPBLAS_OP_N, HIPBLAS_OP_N,
head_size, sequence_length, kv_sequence_length,
&one, v, head_size, strideA,
scratch2, kv_sequence_length, temp_matrix_size,
&zero, scratch3, head_size, strideB, BN, device_prop));
}
// scratch3 is BxNxSxH, transpose to output SxBxNxH
return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, true, scratch3, output);
}
Status LaunchDecoderAttentionKernel(
const hipDeviceProp_t& device_prop,
hipStream_t stream,
hipblasHandle_t& cublas,
const size_t element_size,
const int batch_size,
const int sequence_length,
const int kv_sequence_length,
const int num_heads,
const int head_size,
const bool static_kv,
const bool use_past,
const bool has_layer_state,
const bool has_key_padding_mask,
const float mask_filter_value,
const void* gemm_query_buffer,
const void* gemm_kv_buffer,
const bool* key_padding_mask,
const void* key_cache,
const void* value_cache,
void* qkv_buffer,
void* workspace_buffer,
void* output,
void* new_key_cache,
void* new_value_cache) {
if (element_size == 2) {
return DecoderQkvToContext(
device_prop,
stream,
cublas,
element_size,
batch_size,
sequence_length,
kv_sequence_length,
num_heads,
head_size,
static_kv,
use_past,
has_layer_state,
has_key_padding_mask,
mask_filter_value,
reinterpret_cast<const half*>(gemm_query_buffer),
reinterpret_cast<const half*>(gemm_kv_buffer),
key_padding_mask,
reinterpret_cast<const half*>(key_cache),
reinterpret_cast<const half*>(value_cache),
reinterpret_cast<half*>(qkv_buffer),
reinterpret_cast<half*>(workspace_buffer),
reinterpret_cast<half*>(output),
reinterpret_cast<half*>(new_key_cache),
reinterpret_cast<half*>(new_value_cache));
} else {
return DecoderQkvToContext(
device_prop,
stream,
cublas,
element_size,
batch_size,
sequence_length,
kv_sequence_length,
num_heads,
head_size,
static_kv,
use_past,
has_layer_state,
has_key_padding_mask,
mask_filter_value,
reinterpret_cast<const float*>(gemm_query_buffer),
reinterpret_cast<const float*>(gemm_kv_buffer),
key_padding_mask,
reinterpret_cast<const float*>(key_cache),
reinterpret_cast<const float*>(value_cache),
reinterpret_cast<float*>(qkv_buffer),
reinterpret_cast<float*>(workspace_buffer),
reinterpret_cast<float*>(output),
reinterpret_cast<float*>(new_key_cache),
reinterpret_cast<float*>(new_value_cache));
}
}
// Template Instantiation
template struct AttentionData<float>;
template struct AttentionData<half>;
template Status QkvToContext<float>(
const hipDeviceProp_t& device_prop,
hipblasHandle_t& cublas,
hipStream_t stream,
contrib::AttentionParameters& parameters,
AttentionData<float>& data);
template Status QkvToContext<half>(
const hipDeviceProp_t& device_prop,
hipblasHandle_t& cublas,
hipStream_t stream,
contrib::AttentionParameters& parameters,
AttentionData<half>& data);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 1bdff5127683b20eed5fce7af36029f9667bb4e5.cu | /*
The implementation of this file is based on qkvToContext plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Modifications:
// (1) support GPT-2 past state, unidirectional mask and 4D attention mask from Megatron
// (2) support 2D attention mask
// (3) allow persistent softmax from PyTorch for debugging purpose.
// (4) support different input hidden size and model hidden size for pruned model
// (5) support different hidden sizes of Q/K and V
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cassert>
#include <cuda_fp16.h>
#include <cub/cub.cuh>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/shared_inc/fpgeneric.h"
#include "contrib_ops/cuda/bert/attention_impl.h"
#include "contrib_ops/cuda/bert/attention_softmax.h"
#include "contrib_ops/cuda/bert/transformer_common.h"
#include "contrib_ops/cuda/bert/add_bias_transpose.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/mha_runner.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/cross_attention/fmha_cross_attention.h"
#include "contrib_ops/cpu/bert/attention_base.h"
#include "contrib_ops/cuda/bert/bert_padding.h"
#include "contrib_ops/cuda/transformers/dump_cuda_tensor.h"
#include "contrib_ops/cuda/bert/cutlass_fmha/memory_efficient_attention.h"
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
constexpr size_t kMemoryAlignment = 256;
static size_t AlignTo(size_t a, size_t b) {
return CeilDiv(a, b) * b;
}
size_t AlignSize(size_t bytes) {
const size_t bytesAligned = AlignTo(bytes, kMemoryAlignment);
return bytesAligned;
}
void CumulatedSequenceLengthCache::Initialize(int32_t sequence_length, cudaStream_t stream) {
if (this->sequence_length != sequence_length) {
ORT_ENFORCE(buffer.get() != nullptr && this->max_batch_size > 0);
LaunchTrtSequenceOffset(reinterpret_cast<int32_t*>(buffer.get()), nullptr, this->max_batch_size, sequence_length, stream);
this->sequence_length = sequence_length;
}
}
int* GetCumulatedSequenceLength(CumulatedSequenceLengthCache* cache,
const int* mask_index,
int batch_size,
int sequence_length,
cudaStream_t stream,
void* scratch_buffer) {
if (mask_index == nullptr && cache != nullptr) {
if (batch_size <= cache->max_batch_size) {
cache->Initialize(sequence_length, stream);
return reinterpret_cast<int*>(cache->buffer.get());
}
}
int* sequence_offset = reinterpret_cast<int*>(scratch_buffer);
LaunchTrtSequenceOffset(sequence_offset, mask_index, batch_size, sequence_length, stream);
return sequence_offset;
}
size_t GetAttentionScratchSize(
size_t element_size,
size_t batch_size,
size_t num_heads,
size_t sequence_length,
size_t total_sequence_length) {
const size_t bytes = element_size * batch_size * num_heads * sequence_length * total_sequence_length;
return AlignSize(bytes);
}
size_t GetSequenceOffsetSize(int batch_size, bool has_padding) {
// There are batch_size + 1 offsets Without padding (or padding removed), and 2 * batch_size + 1 with padding.
size_t bytes = sizeof(int) * ((has_padding ? 2 * batch_size : batch_size) + 1);
return AlignSize(bytes);
;
}
size_t GetAttentionWorkspaceSize(
size_t element_size,
size_t batch_size,
size_t num_heads,
size_t qk_head_size,
size_t v_head_size,
size_t sequence_length,
size_t kv_sequence_length,
size_t total_sequence_length,
void* fused_runner,
bool use_fused_cross_attention,
bool use_memory_efficient_attention) {
// Note that q, k and v might need alignment for fused attention kernels.
const size_t qkv_bytes = element_size * batch_size * num_heads *
((sequence_length + kv_sequence_length) * qk_head_size + kv_sequence_length * v_head_size);
#if USE_FLASH_ATTENTION
if (use_memory_efficient_attention) {
size_t fmha_buffer_bytes = 0;
if (MemoryEfficientAttentionParams::need_workspace(v_head_size, element_size == sizeof(float))) {
fmha_buffer_bytes = batch_size * sequence_length * num_heads * v_head_size * sizeof(float);
}
return qkv_bytes + fmha_buffer_bytes;
}
#else
ORT_UNUSED_PARAMETER(use_memory_efficient_attention);
#endif
if (fused_runner != nullptr) {
return qkv_bytes + GetSequenceOffsetSize(static_cast<int>(batch_size), true);
}
if (use_fused_cross_attention) {
return qkv_bytes + 2 * GetSequenceOffsetSize(static_cast<int>(batch_size), true);
}
return qkv_bytes + 2 * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length,
total_sequence_length);
}
template <typename T>
__global__ void AddBiasTransAppendKvToPresentSmall(
const T* qkv, const T* biases, T* present,
const int head_size, const int past_sequence_length, const int max_sequence_length) {
// Input: BxSxMxNxH (Format 1)
// Output: (2, B, N, [P..P+S) of MaxS, H),
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
const int n = threadIdx.y;
const int s = blockIdx.x;
const int b = blockIdx.y;
const int N = blockDim.y;
const int S = gridDim.x;
const int B = gridDim.y;
constexpr int M = 3; // Matrix count in qkv
const int m = blockIdx.z + 1; // k = 1, v = 2
const int NH = N * head_size;
const int NHS = NH * S;
qkv += (n * head_size + (s * M + m) * NH + b * M * NHS);
if (biases) {
biases += (m * NH + n * head_size);
}
const int MsH = max_sequence_length * head_size;
const int NMsH = N * MsH;
const int BNMsH = B * NMsH;
present += ((past_sequence_length + s) * head_size + n * MsH + b * NMsH + (m - 1) * BNMsH);
for (int h = threadIdx.x; h < head_size; h += blockDim.x) {
T bias = (biases ? biases[h] : (T)0.0f);
present[h] = qkv[h] + bias;
}
}
template <typename T>
__global__ void AddBiasTransAppendKvToPresent(
const T* qkv, const T* biases, T* present,
const int head_size, const int past_sequence_length, const int max_sequence_length) {
// Input: BxSxMxNxH (Format 1)
// Output: (2, B, N, [P..P+S) of MaxS, H),
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
const int n = blockIdx.x;
const int s = blockIdx.y;
const int b = (blockIdx.z >> 1);
const int N = gridDim.x;
const int S = gridDim.y;
const int B = (gridDim.z >> 1);
constexpr int M = 3; // Matrix count in qkv
const int m = (blockIdx.z & 0x1) + 1; // k = 1, v = 2
const int NH = N * head_size;
const int NHS = NH * S;
qkv += (n * head_size + (s * M + m) * NH + b * M * NHS);
if (biases) {
biases += (m * NH + n * head_size);
}
const int MsH = max_sequence_length * head_size;
const int NMsH = N * MsH;
const int BNMsH = B * NMsH;
present += ((past_sequence_length + s) * head_size + n * MsH + b * NMsH + (m - 1) * BNMsH);
for (int h = threadIdx.x; h < head_size; h += blockDim.x) {
T bias = (biases ? biases[h] : (T)0.0f);
present[h] = qkv[h] + bias;
}
}
// qkv buffer is merged tensor of shape (B,S,3,N,H), k v is the second/third of the 3.
// bias is of shape (3, NxH) or nullptr
// append to present of (2, B, N, (P..T) of M, H),
template <typename T>
Status LaunchAddBiasTransAppendKvToPresent(cudaStream_t stream,
const int max_sequence_length,
const int past_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const T* biases,
const T* qkv_buffer,
T* present) {
assert(head_size <= (1 << 30));
int64_t nh = (int64_t)head_size * num_heads;
if (nh <= max_threads_per_block) {
const dim3 grid(sequence_length, batch_size, 2); // 2 for k and v
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
AddBiasTransAppendKvToPresentSmall<T><<<grid, block, 0, stream>>>(
qkv_buffer, biases, present, head_size, past_sequence_length, max_sequence_length);
} else {
const dim3 grid(num_heads, sequence_length, batch_size * 2); // 2 for k and v
const dim3 block(std::min(head_size, max_threads_per_block), 1, 1);
AddBiasTransAppendKvToPresent<T><<<grid, block, 0, stream>>>(
qkv_buffer, biases, present, head_size, past_sequence_length, max_sequence_length);
}
return CUDA_CALL(cudaGetLastError());
}
template Status LaunchAddBiasTransAppendKvToPresent(cudaStream_t stream,
const int max_sequence_length,
const int total_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const float* bias,
const float* qkv_buffer,
float* present);
template Status LaunchAddBiasTransAppendKvToPresent(cudaStream_t stream,
const int max_sequence_length,
const int total_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const half* bias,
const half* qkv_buffer,
half* present);
template <typename T>
Status PrepareQkv(contrib::AttentionParameters& parameters,
AttentionData<T>& data,
cudaStream_t stream,
int max_threads_per_block,
T* q, T* k, T* v, AttentionQkvFormat& qkv_format) {
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int kv_sequence_length = parameters.kv_sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
const bool past_present_share_buffer = parameters.past_present_share_buffer;
void* fused_runner = data.fused_runner;
bool use_memory_efficient_attention = data.use_memory_efficient_attention;
T* qkv = data.workspace;
bool use_fused_kernel = (nullptr != fused_runner && !parameters.is_unidirectional);
bool use_fused_causal = (nullptr != fused_runner && parameters.is_unidirectional);
// Default format for memory efficient attention.
// When there is past state, the format shal be BxNxSxH, so we disable memory efficient attention when there is past.
DUMP_TENSOR_INIT();
if (nullptr != data.gemm_buffer) {
if (data.bias == nullptr) {
assert(nullptr == fused_runner);
// For quantized attention, bias has been added so only need transpose here.
// gemm_buffer should be BxSx3xNxH => qkv: 3xBxNxSxH
assert(qk_head_size == v_head_size);
int matrix_to_trans = (past_present_share_buffer ? 1 : 3);
ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, matrix_to_trans, sequence_length, batch_size, qk_head_size, num_heads,
max_threads_per_block, false, data.gemm_buffer, qkv, 3));
qkv_format = AttentionQkvFormat::Q_K_V_BNSH;
} else {
// For fused TRT attention, transpose qkv to BxSxNx3xH (format 2)
// For memory efficient attention, transpose to 3xBxSxNxH (format 3)
// For unfused kernel, transpose to 3xBxNxSxH (format 1)
// For fused causal kernel, use format 1 since we need have K and V to update present state,
// at the same time, we update gemm_buffer BxSx3xNxH with bias which is used as input for fused causal kernel.
const int format = (use_fused_kernel ? 2 : (use_memory_efficient_attention ? 3 : 1));
qkv_format = use_fused_kernel
? AttentionQkvFormat::QKV_BSN3H
: (use_memory_efficient_attention
? AttentionQkvFormat::Q_K_V_BSNH
: (use_fused_causal ? AttentionQkvFormat::Q_K_V_BNSH_QKV_BS3NH : AttentionQkvFormat::Q_K_V_BNSH));
// For fused causal, we will update gemm_buffer with bias directly.
T* qkv_add_bias = use_fused_causal ? data.gemm_buffer : nullptr;
int matrix_to_transpose = ((format == AttentionQkvFormat::Q_K_V_BNSH && past_present_share_buffer) ? 1 : 3);
// format 1: BxSx(NH + NH + NH_v) => BxNxSxH + BxNxSxH + BxNxSxH_v
// format 2: BxSx(NH + NH + NH) => BxSxNx(H + H + H)
LaunchAddBiasTranspose(stream, matrix_to_transpose, format, max_threads_per_block,
batch_size, sequence_length, num_heads, qk_head_size,
data.gemm_buffer, data.bias, qkv, true, v_head_size, qkv_add_bias,
3, parameters.do_rotary, parameters.original_past_sequence_length);
}
} else if (data.key == nullptr) { // gemm_buffer == nullptr and packed qkv
assert(data.bias == nullptr);
assert(qk_head_size == v_head_size);
DUMP_TENSOR_D("packed_qkv", data.query, batch_size * sequence_length, num_heads, 3, qk_head_size);
if (use_memory_efficient_attention) {
// unpack qkv to BSNH. Note that there is no bias so we need not output query to q.
constexpr int format = 4;
T* qkv_add_bias = nullptr;
LaunchAddBiasTranspose(stream, 3, format, max_threads_per_block,
batch_size, sequence_length, num_heads, qk_head_size,
data.query, data.bias, qkv,
true, v_head_size, qkv_add_bias, 3);
DUMP_TENSOR_D("k(BSNH)", q, batch_size * sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("k(BSNH)", k, batch_size * kv_sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("v(BSNH)", v, batch_size * kv_sequence_length, num_heads, v_head_size);
qkv_format = AttentionQkvFormat::Q_K_V_BSNH;
} else {
if (!use_fused_kernel) {
return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "packed QKV format is not implemented for current GPU. Please disable it in fusion options.");
}
qkv_format = AttentionQkvFormat::QKV_BSN3H;
}
} else if (data.value == nullptr) { // gemm_buffer == nullptr and packed kv
// TODO: unpack kv to BNSH for unfused kernel so that we can remove the following constraint.
// CheckInputs verified this constraint.
assert(data.bias == nullptr);
assert(qk_head_size == v_head_size);
DUMP_TENSOR_D("packed_kv", data.key, batch_size * kv_sequence_length, num_heads, 2, qk_head_size);
if (use_memory_efficient_attention) {
// unpack kv to BSNH. Note that there is no bias so we need not output query to q.
constexpr int format = 4;
T* qkv_add_bias = nullptr;
const T* kv_bias = (data.bias == nullptr ? data.bias : data.bias + parameters.hidden_size);
LaunchAddBiasTranspose(stream, 2, format, max_threads_per_block,
batch_size, kv_sequence_length, num_heads, qk_head_size,
data.key, kv_bias, k,
true, v_head_size, qkv_add_bias, 2);
DUMP_TENSOR_D("k(BSNH)", k, batch_size * kv_sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("v(BSNH)", v, batch_size * kv_sequence_length, num_heads, v_head_size);
qkv_format = AttentionQkvFormat::Q_K_V_BSNH;
} else {
if (data.fused_cross_attention_kernel == nullptr) {
return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "packed KV format is not implemented for current GPU. Please disable packed kv in fusion options.");
}
qkv_format = AttentionQkvFormat::Q_KV_BSNH_BSN2H;
}
} else { // gemm_buffer == nullptr and not packed
assert(data.query != nullptr && data.key != nullptr && data.value != nullptr && data.bias != nullptr);
DUMP_TENSOR_D("query", data.query, batch_size * sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("query_bias", data.bias, num_heads, qk_head_size);
DUMP_TENSOR_D("key", data.key, batch_size * kv_sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("key_bias", data.bias + num_heads * qk_head_size, num_heads, qk_head_size);
DUMP_TENSOR_D("value", data.value, batch_size * kv_sequence_length, num_heads, v_head_size);
DUMP_TENSOR_D("value_bias", data.bias + 2 * num_heads * qk_head_size, num_heads, v_head_size);
if (data.fused_cross_attention_kernel != nullptr) {
assert(qk_head_size == v_head_size);
// For fused cross attention, besides adding bias, K and V needed to be packed:
// K (BxSxNxH), V (BxSxNxH) => BxSxNx2xH
LaunchAddBiasTransposeTrt(
stream, max_threads_per_block,
batch_size, sequence_length,
num_heads, qk_head_size,
data.bias, data.query, data.key, data.value, qkv, true, kv_sequence_length);
qkv_format = AttentionQkvFormat::Q_KV_BSNH_BSN2H;
}
#if USE_FLASH_ATTENTION
else if (use_memory_efficient_attention) {
LaunchAddBias(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length,
num_heads, qk_head_size, v_head_size,
data.bias, data.query, data.key, data.value, q, k, v);
DUMP_TENSOR_D("q(BSNH)", q, batch_size * sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("k(BSNH)", k, batch_size * kv_sequence_length, num_heads, qk_head_size);
DUMP_TENSOR_D("v(BSNH)", v, batch_size * kv_sequence_length, num_heads, v_head_size);
qkv_format = AttentionQkvFormat::Q_K_V_BSNH;
}
#endif
else if (use_fused_kernel) {
assert(qk_head_size == v_head_size);
// Q (BxSxNxH), K (BxSxNxH), V (BxSxNxH) => BxSxNx(H + H + H)
LaunchAddBiasTransposeTrt(
stream, max_threads_per_block,
batch_size, sequence_length,
num_heads, qk_head_size,
data.bias, data.query, data.key, data.value, qkv, false, kv_sequence_length);
DUMP_TENSOR_D("qkv(BSN3H)", qkv, batch_size, sequence_length, num_heads, 2 * qk_head_size + v_head_size);
qkv_format = AttentionQkvFormat::QKV_BSN3H;
} else { // unfused kernel
ORT_ENFORCE(!use_fused_causal, "MultiHeadAttention has not enabled fused causal");
// Query (BxSxNxH) => Q (BxNxSxH)
constexpr int format = 0;
LaunchAddBiasTranspose<T>(stream, 1, format, max_threads_per_block,
batch_size, sequence_length, num_heads, qk_head_size,
data.query, data.bias, q,
true, -1);
// Key (BxLxNxH) => K (BxNxLxH)
LaunchAddBiasTranspose<T>(stream, 1, format, max_threads_per_block,
batch_size, kv_sequence_length, num_heads, qk_head_size,
data.key, data.bias + num_heads * qk_head_size, k,
true, -1);
// Value (BxLxNxH_v) => K (BxNxLxH_v)
LaunchAddBiasTranspose<T>(stream, 1, format, max_threads_per_block,
batch_size, kv_sequence_length, num_heads, v_head_size,
data.value, data.bias + 2 * num_heads * qk_head_size, v,
true, -1);
DUMP_TENSOR_D("q(BNSH)", q, batch_size * num_heads, sequence_length, qk_head_size);
DUMP_TENSOR_D("k(BNSH)", k, batch_size * num_heads, kv_sequence_length, qk_head_size);
DUMP_TENSOR_D("v(BNSH)", v, batch_size * num_heads, kv_sequence_length, v_head_size);
qkv_format = AttentionQkvFormat::Q_K_V_BNSH;
}
}
CUDA_RETURN_IF_ERROR(cudaGetLastError());
return Status::OK();
}
template <typename T>
Status QkvToContext(
const cudaDeviceProp& device_prop,
cublasHandle_t& cublas,
cudaStream_t stream,
contrib::AttentionParameters& parameters,
AttentionData<T>& data) {
constexpr size_t element_size = sizeof(T);
const int max_threads_per_block = device_prop.maxThreadsPerBlock;
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int kv_sequence_length = parameters.kv_sequence_length;
const int total_sequence_length = parameters.total_sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
const bool past_present_share_buffer = parameters.past_present_share_buffer;
const float mask_filter_value = parameters.mask_filter_value;
void* fused_runner = data.fused_runner;
// At most one fused kernel is enabled.
assert(int(data.use_memory_efficient_attention) + int(fused_runner != nullptr) + int(data.fused_cross_attention_kernel != nullptr) <= 1);
const int batches = batch_size * num_heads;
T* qkv = nullptr;
T* q = nullptr;
T* k = nullptr;
T* v = nullptr;
T* scratch1 = data.workspace;
if (data.has_qkv_workspace) {
const int size_per_batch_q = sequence_length * qk_head_size;
const int size_per_batch_k = kv_sequence_length * qk_head_size;
const int size_per_batch_v = kv_sequence_length * v_head_size;
const size_t elements_q = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_q);
const size_t elements_k = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_k);
const size_t elements_v = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_v);
qkv = data.workspace;
q = qkv;
k = q + elements_q;
v = k + elements_k;
scratch1 = v + elements_v;
}
bool use_fused_kernel = (nullptr != fused_runner && !parameters.is_unidirectional);
bool use_fused_causal = (nullptr != fused_runner && parameters.is_unidirectional);
AttentionQkvFormat qkv_format = AttentionQkvFormat::Q_K_V_BSNH;
ORT_RETURN_IF_ERROR(PrepareQkv<T>(parameters, data, stream, max_threads_per_block, q, k, v, qkv_format));
int present_size_per_batch_k = 0;
int present_size_per_batch_v = 0;
if (!past_present_share_buffer) {
// Concat past key value to present (2xBxNxLxH), where L is kv_sequence_length and T is total_sequence_length.
// past_k (BxNxPxH) + k (BxNxLxH) => present_k (BxNxTxH)
// past_v (BxNxPxH) + v (BxNxLxH) => present_v (BxNxTxH)
// When there is past state, the head size for Q/K/V shall be same: H == H_v.
present_size_per_batch_k = total_sequence_length * qk_head_size;
present_size_per_batch_v = total_sequence_length * v_head_size;
if (nullptr != data.present) {
assert(qkv_format == AttentionQkvFormat::Q_K_V_BNSH || qkv_format == AttentionQkvFormat::Q_K_V_BNSH_QKV_BS3NH);
ORT_RETURN_IF_ERROR(
LaunchConcatPastToPresent(stream, total_sequence_length, sequence_length, batch_size, qk_head_size, num_heads,
max_threads_per_block, data.past, k, data.present));
// Update pointers to present_k and present_v.
k = data.present;
v = data.present + batches * present_size_per_batch_k;
}
} else {
assert(qk_head_size == v_head_size);
assert(data.fused_cross_attention_kernel == nullptr);
assert(!use_fused_kernel);
assert(data.gemm_buffer != nullptr);
assert(!data.use_memory_efficient_attention);
assert(data.has_qkv_workspace);
if (data.present != data.past) {
// For easy testing. Production should better avoid this path.
int64_t kv_size = 2LL * (int64_t)batch_size * num_heads * parameters.max_sequence_length * qk_head_size;
cudaMemcpyAsync(data.present, data.past, kv_size * sizeof(T), cudaMemcpyDeviceToDevice, stream);
}
// append last k v to present
ORT_RETURN_IF_ERROR(LaunchAddBiasTransAppendKvToPresent(
stream, parameters.max_sequence_length, parameters.past_sequence_length, sequence_length,
batch_size, qk_head_size, num_heads, max_threads_per_block,
use_fused_causal ? nullptr : data.bias, // For fused causal, bias has been added to gemm_buffer
data.gemm_buffer, data.present));
present_size_per_batch_k = parameters.max_sequence_length * qk_head_size;
present_size_per_batch_v = present_size_per_batch_k;
k = data.present;
v = data.present + batches * present_size_per_batch_k;
}
// Q, K and V are ready now
DUMP_TENSOR_INIT();
if (data.fused_cross_attention_kernel != nullptr) {
assert(qkv_format == AttentionQkvFormat::Q_KV_BSNH_BSN2H);
// We only enable fused cross attention when there is no key padding mask.
// Otherwise, key have effective batch size 2 * batch_size, which is different from batch_size of query.
assert(data.mask_index == nullptr);
int* q_sequence_offset = GetCumulatedSequenceLength(data.cumulated_sequence_length_q_cache,
data.mask_index, batch_size, sequence_length, stream,
scratch1);
DUMP_TENSOR_D("q_sequence_offset", q_sequence_offset, 1, batch_size + 1);
int* kv_sequence_offset = q_sequence_offset + (GetSequenceOffsetSize(batch_size, false) / sizeof(int));
kv_sequence_offset = GetCumulatedSequenceLength(data.cumulated_sequence_length_kv_cache,
data.mask_index, batch_size, kv_sequence_length, stream,
kv_sequence_offset);
CUDA_RETURN_IF_ERROR(cudaGetLastError());
DUMP_TENSOR_D("kv_sequence_offset", kv_sequence_offset, 1, batch_size + 1);
FusedMultiHeadCrossAttentionKernel const* cross_attention_kernel =
reinterpret_cast<FusedMultiHeadCrossAttentionKernel const*>(data.fused_cross_attention_kernel);
// When there is no bias, we can directly use q and packed kv from inputs.
void const* query = q;
void const* packed_kv = k;
if (data.value == nullptr && data.bias == nullptr) {
query = data.query;
packed_kv = data.key;
}
run_fused_cross_attention(
query, // Q
packed_kv, // packed KV
q_sequence_offset, // cumulated sequence length of Q
kv_sequence_offset, // cumulated sequence length of KV
data.output, // output
cross_attention_kernel, // kernels
batch_size, // batch size
num_heads, // number of heads
qk_head_size, // head size of Q/K/V
sequence_length, // sequence length of Q
kv_sequence_length, // sequence length of KV
stream);
DUMP_TENSOR("trt cross output", data.output, batch_size * sequence_length, num_heads, v_head_size);
return Status::OK();
}
// Run TRT fused attention.
if (use_fused_kernel || use_fused_causal) {
int* sequence_offset = reinterpret_cast<int*>(scratch1);
if (parameters.mask_type == AttentionMaskType::MASK_2D_KEY_PADDING) {
DUMP_TENSOR_D("mask", reinterpret_cast<const int*>(data.mask_index), batch_size, sequence_length);
LaunchTrtSequenceOffset2d(sequence_offset, data.mask_index, batch_size, sequence_length, stream);
} else {
sequence_offset = GetCumulatedSequenceLength(data.cumulated_sequence_length_q_cache,
data.mask_index, batch_size, sequence_length, stream,
sequence_offset);
}
DUMP_TENSOR_D("sequence_offset", sequence_offset, 1, (data.mask_index != nullptr ? 2 : 1) * batch_size + 1);
CUDA_RETURN_IF_ERROR(cudaGetLastError());
FusedMHARunnerFP16v2* fused_fp16_runner = reinterpret_cast<FusedMHARunnerFP16v2*>(fused_runner);
const int S = use_fused_causal ? sequence_length : fused_fp16_runner->getSFromMaxSeqLen(sequence_length);
// B = 2 * batch_size when there is padding in input, and B = batch_size when padding is removed.
const int B = (nullptr == data.mask_index ? batch_size : 2 * batch_size);
fused_fp16_runner->setup(S, B);
if (use_fused_kernel) {
assert(qkv_format == AttentionQkvFormat::QKV_BSN3H);
// When there is no bias, we can directly use packed qkv from inputs.
void const* packed_qkv = qkv;
if (data.query != nullptr && data.key == nullptr && data.bias == nullptr) {
packed_qkv = data.query;
}
fused_fp16_runner->run(packed_qkv, sequence_offset, data.output, stream);
DUMP_TENSOR("fused output", data.output, batch_size * sequence_length, num_heads, v_head_size);
} else {
assert(qkv_format == AttentionQkvFormat::Q_K_V_BNSH_QKV_BS3NH);
fused_fp16_runner->run(data.gemm_buffer, sequence_offset, data.output, stream);
DUMP_TENSOR("fused causal output", data.output, batch_size * sequence_length, num_heads, v_head_size);
}
return Status::OK();
}
#if USE_FLASH_ATTENTION
if (data.use_memory_efficient_attention) {
// We only enable fused cross attention when there is no key padding mask.
// Otherwise, key have effective batch size 2 * batch_size, which is different from batch_size of query.
assert(data.mask_index == nullptr);
assert(qkv_format == AttentionQkvFormat::Q_K_V_BSNH);
const void* query = q;
const void* key = k;
const void* value = v;
// For packed KV, we can use query input directly.
if (data.gemm_buffer == nullptr && data.key != nullptr && data.value == nullptr) {
assert(data.bias == nullptr);
query = data.query;
}
MemoryEfficientAttentionParams p;
p.sm = device_prop.major * 10 + device_prop.minor;
p.is_half = sizeof(T) == 2;
p.batch_size = data.mask_index == nullptr ? parameters.batch_size : 2 * parameters.batch_size;
p.num_heads = parameters.num_heads;
p.sequence_length = parameters.sequence_length;
p.kv_sequence_length = parameters.total_sequence_length;
p.qk_head_size = parameters.head_size;
p.v_head_size = parameters.v_head_size;
p.causal = parameters.is_unidirectional;
p.cu_seqlens_q = nullptr;
p.cu_seqlens_k = nullptr;
p.query = query;
p.key = key;
p.value = value;
p.output = data.output;
p.workspace = MemoryEfficientAttentionParams::need_workspace(v_head_size, sizeof(T) == sizeof(float)) ? scratch1 : nullptr;
p.stream = stream;
run_memory_efficient_attention(p);
DUMP_TENSOR("cutlass output", data.output, batch_size * sequence_length, num_heads, v_head_size);
return Status::OK();
}
#endif
// The following are unfused attention.
assert(qkv_format == AttentionQkvFormat::Q_K_V_BNSH);
const int* mask_index = data.mask_index;
gsl::span<const int64_t>& mask_index_dims = data.mask_index_dims;
// Raw attention mask could be 2D (BxT) or 3D (BxSxT) or 4D(Bx1xMxM), where M is the max sequence length.
bool use_raw_attention_mask = (nullptr != mask_index && mask_index_dims.size() >= 2);
// Compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxT
// Q: BxNxSxH, K (present_k): BxNxTxH, Q*K': BxNxSxT
float one = 1.0f;
float zero = 0.f;
// For raw attention mask, the scalar 1/sqrt(H) is moved to combine with softmax computation.
const float scale = parameters.scale == 0.0f ? 1.f / sqrt(static_cast<float>(qk_head_size))
: parameters.scale;
float alpha = use_raw_attention_mask ? one : scale;
cublasSetStream(cublas, stream);
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, CUBLAS_OP_T, CUBLAS_OP_N,
total_sequence_length, sequence_length, qk_head_size,
&alpha, k, qk_head_size, present_size_per_batch_k,
q, qk_head_size, sequence_length * qk_head_size,
&zero, scratch1, total_sequence_length, sequence_length * total_sequence_length, batches, device_prop));
DUMP_TENSOR_D("QK", scratch1, batch_size * num_heads, sequence_length, total_sequence_length);
const size_t bytes = GetAttentionScratchSize(element_size, batch_size, num_heads,
sequence_length, total_sequence_length);
T* scratch2 = scratch1 + (bytes / element_size);
// Apply softmax and store result R to scratch2: BxNxSxT
if (use_raw_attention_mask) { // 2d, 3d or 4d attention mask
const int mask_dimension = static_cast<int>(mask_index_dims.size());
// For testing, environment variable ORT_TRANSFORMER_OPTIONS=1 could enable persistent softmax used in Torch.
const TransformerOptions* options = TransformerOptions::GetInstance();
bool use_persistent_softmax = options->IsPrecisionMode() && !options->DisablePersistentSoftmax();
T* persistent_softmax_workspace = scratch1; // replace Q*K' in place with masked score for persistent softmax.
ORT_RETURN_IF_ERROR(
ComputeSoftmaxWithRawMask<T>(stream, total_sequence_length, sequence_length, batch_size, num_heads,
mask_index, nullptr, data.relative_position_bias, scratch1, scratch2,
parameters.is_unidirectional, scale, mask_dimension,
parameters.max_sequence_length, use_persistent_softmax,
persistent_softmax_workspace, mask_filter_value));
} else if (nullptr != mask_index) { // 1d mask index
assert(mask_index_dims.size() == 1);
// mask_index has 1D shape: either (batch_size) or (2*batch_size). Only the later one has start postions.
const int* mask_start = (mask_index_dims[0] > batch_size) ? mask_index + batch_size : nullptr;
ORT_RETURN_IF_ERROR(ComputeSoftmaxWithMask1D<T>(
stream, total_sequence_length, sequence_length, batch_size, num_heads,
mask_index, mask_start, data.relative_position_bias, scratch1, scratch2, parameters.is_unidirectional));
} else { // no mask
ORT_RETURN_IF_ERROR(
ComputeSoftmax<T>(stream, total_sequence_length, sequence_length, batch_size, num_heads, data.relative_position_bias,
scratch1, scratch2, parameters.is_unidirectional));
}
DUMP_TENSOR_D("Softmax", scratch2, batch_size * num_heads, sequence_length, total_sequence_length);
// compute R*V (as V*R), and store in temp_output (space used by Q): BxNxSxH_v
T* temp_output = qkv;
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, CUBLAS_OP_N, CUBLAS_OP_N,
v_head_size, sequence_length, total_sequence_length,
&one, v, v_head_size, present_size_per_batch_v,
scratch2, total_sequence_length, sequence_length * total_sequence_length,
&zero, temp_output, v_head_size, sequence_length * v_head_size, batches, device_prop));
// Temp_output is BxNxSxH_v, transpose to output BxSxNxH_v
Status result = LaunchTransCtx(stream, sequence_length, batch_size, v_head_size, num_heads,
max_threads_per_block, false, temp_output, data.output);
DUMP_TENSOR("unfused output", data.output, batch_size * sequence_length, num_heads, v_head_size);
return result;
}
template <typename T>
Status DecoderQkvToContext(
const cudaDeviceProp& device_prop,
cudaStream_t stream,
cublasHandle_t& cublas,
const size_t element_size,
const int batch_size,
const int sequence_length,
const int kv_sequence_length,
const int num_heads,
const int head_size,
const bool static_kv,
const bool use_past,
const bool has_layer_state,
const bool has_key_padding_mask,
const float mask_filter_value,
const T* gemm_query_buffer,
const T* gemm_kv_buffer,
const bool* key_padding_mask,
const T* key_cache,
const T* value_cache,
T* qkv_buffer,
T* workspace_buffer,
T* output,
T* new_key_cache,
T* new_value_cache) {
const int max_threads_per_block = device_prop.maxThreadsPerBlock;
const int BN = batch_size * num_heads;
const int BHN = BN * head_size;
const int BNS = BN * sequence_length;
const int k_buffer_offset = sequence_length * BHN;
const int v_buffer_offset = (sequence_length + kv_sequence_length) * BHN;
T* temp_qkv_buffer = workspace_buffer;
const T* q = qkv_buffer;
// transpose q and copy them to qkv_buffer
ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, 1, sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, true, gemm_query_buffer, qkv_buffer));
const T* k = qkv_buffer + k_buffer_offset;
const T* v = qkv_buffer + v_buffer_offset;
if (!has_layer_state || !use_past) {
if (!static_kv) {
// transpose kv and copy them to qkv_buffer
ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, 2, sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, true, gemm_kv_buffer, qkv_buffer + k_buffer_offset));
} else {
// transpose kv and copy them to qkv_buffer
ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, 2, kv_sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, true, gemm_kv_buffer, qkv_buffer + k_buffer_offset));
}
} else {
if (!static_kv) {
// transpose kv and copy them to temp_buffer
ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, 2, sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, true, gemm_kv_buffer, temp_qkv_buffer));
// concat cache-k with k and copy to qkv_buffer
if (nullptr != key_cache) {
ORT_RETURN_IF_ERROR(LaunchConcatTensorToTensor(stream, kv_sequence_length,
sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, 1,
key_cache,
temp_qkv_buffer,
qkv_buffer + k_buffer_offset));
}
// concat cache-v with v and copy to qkv_buffer
if (nullptr != value_cache) {
ORT_RETURN_IF_ERROR(LaunchConcatTensorToTensor(stream, kv_sequence_length,
sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, 1,
value_cache,
temp_qkv_buffer + k_buffer_offset,
qkv_buffer + v_buffer_offset));
}
}
}
if (has_layer_state) {
if (use_past && static_kv) {
CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(new_key_cache, key_cache, kv_sequence_length * BHN * sizeof(T),
cudaMemcpyDeviceToDevice, stream));
CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(new_value_cache, value_cache, kv_sequence_length * BHN * sizeof(T),
cudaMemcpyDeviceToDevice, stream));
} else {
CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(new_key_cache, k, kv_sequence_length * BHN * sizeof(T),
cudaMemcpyDeviceToDevice, stream));
CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(new_value_cache, v, kv_sequence_length * BHN * sizeof(T),
cudaMemcpyDeviceToDevice, stream));
}
}
// scratch1: BxNxSxL buffer
// scratch2: BxNxSxL buffer
// scratch3: BxNxSxH buffer
T* scratch1 = temp_qkv_buffer + 3 * BHN * sequence_length;
T* scratch2 = scratch1 + BNS * kv_sequence_length;
T* scratch3 = scratch2 + BNS * kv_sequence_length;
// compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxL
// Q: BxNxSxH, K (present_k): BxNxLxH, Q*K': BxNxSxL
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
const int temp_matrix_size = sequence_length * kv_sequence_length;
float one = 1.0f;
float zero = 0.f;
float alpha = rsqrt_head_size;
const int strideA = kv_sequence_length * head_size;
const int strideB = sequence_length * head_size;
if (use_past && static_kv) {
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, CUBLAS_OP_T, CUBLAS_OP_N,
kv_sequence_length, sequence_length, head_size,
&alpha, key_cache, head_size, strideA,
q, head_size, strideB,
&zero, scratch1, kv_sequence_length, temp_matrix_size, BN, device_prop));
} else {
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, CUBLAS_OP_T, CUBLAS_OP_N,
kv_sequence_length, sequence_length, head_size,
&alpha, k, head_size, strideA,
q, head_size, strideB,
&zero, scratch1, kv_sequence_length, temp_matrix_size, BN, device_prop));
}
constexpr bool is_unidirectional = false;
const T* add_before_softmax = nullptr;
if (has_key_padding_mask) {
constexpr int mask_dimension = 2;
constexpr int max_sequence_length = 0;
ORT_RETURN_IF_ERROR(ComputeSoftmaxWithRawMask<T>(stream, kv_sequence_length, sequence_length, batch_size, num_heads,
nullptr, key_padding_mask, add_before_softmax, scratch1, scratch2,
is_unidirectional, 1.0f, mask_dimension, max_sequence_length,
false, nullptr, mask_filter_value));
} else {
ORT_RETURN_IF_ERROR(ComputeSoftmax<T>(stream, kv_sequence_length, sequence_length, batch_size, num_heads,
add_before_softmax, scratch1, scratch2, is_unidirectional));
}
// compute P*V (as V*P), and store in scratch3: BxNxSxH
if (use_past && static_kv) {
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, CUBLAS_OP_N, CUBLAS_OP_N,
head_size, sequence_length, kv_sequence_length,
&one, value_cache, head_size, strideA,
scratch2, kv_sequence_length, temp_matrix_size,
&zero, scratch3, head_size, strideB, BN, device_prop));
} else {
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, CUBLAS_OP_N, CUBLAS_OP_N,
head_size, sequence_length, kv_sequence_length,
&one, v, head_size, strideA,
scratch2, kv_sequence_length, temp_matrix_size,
&zero, scratch3, head_size, strideB, BN, device_prop));
}
// scratch3 is BxNxSxH, transpose to output SxBxNxH
return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads,
max_threads_per_block, true, scratch3, output);
}
Status LaunchDecoderAttentionKernel(
const cudaDeviceProp& device_prop,
cudaStream_t stream,
cublasHandle_t& cublas,
const size_t element_size,
const int batch_size,
const int sequence_length,
const int kv_sequence_length,
const int num_heads,
const int head_size,
const bool static_kv,
const bool use_past,
const bool has_layer_state,
const bool has_key_padding_mask,
const float mask_filter_value,
const void* gemm_query_buffer,
const void* gemm_kv_buffer,
const bool* key_padding_mask,
const void* key_cache,
const void* value_cache,
void* qkv_buffer,
void* workspace_buffer,
void* output,
void* new_key_cache,
void* new_value_cache) {
if (element_size == 2) {
return DecoderQkvToContext(
device_prop,
stream,
cublas,
element_size,
batch_size,
sequence_length,
kv_sequence_length,
num_heads,
head_size,
static_kv,
use_past,
has_layer_state,
has_key_padding_mask,
mask_filter_value,
reinterpret_cast<const half*>(gemm_query_buffer),
reinterpret_cast<const half*>(gemm_kv_buffer),
key_padding_mask,
reinterpret_cast<const half*>(key_cache),
reinterpret_cast<const half*>(value_cache),
reinterpret_cast<half*>(qkv_buffer),
reinterpret_cast<half*>(workspace_buffer),
reinterpret_cast<half*>(output),
reinterpret_cast<half*>(new_key_cache),
reinterpret_cast<half*>(new_value_cache));
} else {
return DecoderQkvToContext(
device_prop,
stream,
cublas,
element_size,
batch_size,
sequence_length,
kv_sequence_length,
num_heads,
head_size,
static_kv,
use_past,
has_layer_state,
has_key_padding_mask,
mask_filter_value,
reinterpret_cast<const float*>(gemm_query_buffer),
reinterpret_cast<const float*>(gemm_kv_buffer),
key_padding_mask,
reinterpret_cast<const float*>(key_cache),
reinterpret_cast<const float*>(value_cache),
reinterpret_cast<float*>(qkv_buffer),
reinterpret_cast<float*>(workspace_buffer),
reinterpret_cast<float*>(output),
reinterpret_cast<float*>(new_key_cache),
reinterpret_cast<float*>(new_value_cache));
}
}
// Template Instantiation
template struct AttentionData<float>;
template struct AttentionData<half>;
template Status QkvToContext<float>(
const cudaDeviceProp& device_prop,
cublasHandle_t& cublas,
cudaStream_t stream,
contrib::AttentionParameters& parameters,
AttentionData<float>& data);
template Status QkvToContext<half>(
const cudaDeviceProp& device_prop,
cublasHandle_t& cublas,
cudaStream_t stream,
contrib::AttentionParameters& parameters,
AttentionData<half>& data);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
eed73ce9670e5e4c352744353e4096efb053bf85.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define EPSILON 0.000000001
typedef struct
{
double x, y;
} point;
// compute twice the signed area of the triange [P,Q,R]
__device__ double A(const point& P, const point& Q, const point& R) {
return (Q.x-P.x) * (R.y-P.y) - (Q.y-P.y) * (R.x-P.x);
}
// difference of two 2D points
__device__ point sub(const point& a, const point& b) {
point r;
r.x=a.x-b.x;
r.y=a.y-b.y;
return r;
}
// add two 2D points
__device__ point add(const point& a, const point& b) {
point r;
r.x=a.x+b.x;
r.y=a.y+b.y;
return r;
}
// multiply two 2D points
__device__ double mul(const point& a, const point& b) {
point r;
r.x=a.x*b.x;
r.y=a.y*b.y;
return (r.x+r.y);
}
// multiply scalar with 2D points
__device__ point mulScalar(const double c, const point& b) {
point r;
r.x=c*b.x;
r.y=c*b.y;
return r;
}
/*
-----------------------------------------------------------------
Function to return intersection type
Runs in GPU
Called from Device
-------------------------------------------------------------------
*/
// __device__ int getIntersectType(const edge& edgeP, const edge& edgeQ, double& alpha, double& beta) {
__device__ int getIntersectType(const point& P1, const point& P2, const point& Q1, const point& Q2, double& alpha, double& beta) {
// const point2D& P1 = edgeP.one->p;
// const point2D& P2 = edgeP.two->p;
// const point2D& Q1 = edgeQ.one->p;
// const point2D& Q2 = edgeQ.two->p;
double AP1 = A(P1,Q1,Q2);
double AP2 = A(P2,Q1,Q2);
if (fabs(AP1-AP2) > EPSILON) {
// from here: [P1,P2] and [Q1,Q2] are not parallel
//
// analyse potential intersection
//
double AQ1 = A(Q1,P1,P2);
double AQ2 = A(Q2,P1,P2);
// compute alpha and beta
alpha = AP1 / (AP1-AP2);
beta = AQ1 / (AQ1-AQ2);
// classify alpha
bool alpha_is_0 = false;
bool alpha_in_0_1 = false;
if ( (alpha > EPSILON) && (alpha < 1.0-EPSILON) )
alpha_in_0_1 = true;
else
if (fabs(alpha) <= EPSILON)
alpha_is_0 = true;
// classify beta
bool beta_is_0 = false;
bool beta_in_0_1 = false;
if ( (beta > EPSILON) && (beta < 1.0-EPSILON) )
beta_in_0_1 = true;
else
if (fabs(beta) <= EPSILON)
beta_is_0 = true;
//
// distinguish intersection types
//
if (alpha_in_0_1 && beta_in_0_1) return (1);
// return (X_INTERSECTION);
if (alpha_is_0 && beta_in_0_1) return (2);
// return (T_INTERSECTION_Q);
if (beta_is_0 && alpha_in_0_1) return (3);
// return (T_INTERSECTION_P);
if (alpha_is_0 && beta_is_0) return (4);
// return (V_INTERSECTION);
}
else
if (fabs(AP1) < EPSILON) {
// from here: [P1,P2] and [Q1,Q2] are collinear
//
// analyse potential overlap
//
// point2D dP = P2-P1;
// point2D dQ = Q2-Q1;
// point2D PQ = Q1-P1;
point dP = sub(P2, P1);
point dQ = sub(Q2, Q1);
point PQ = sub(Q1, P1);
// compute alpha and beta
// alpha = (PQ*dP) / (dP*dP);
// beta = -(PQ*dQ) / (dQ*dQ);
alpha = mul(PQ,dP) / mul(dP,dP);
beta = -mul(PQ,dQ) / mul(dQ,dQ);
// classify alpha
bool alpha_is_0 = false;
bool alpha_in_0_1 = false;
bool alpha_not_in_0_1 = false;
if ( (alpha > EPSILON) && (alpha < 1.0-EPSILON) )
alpha_in_0_1 = true;
else
if (fabs(alpha) <= EPSILON)
alpha_is_0 = true;
else
alpha_not_in_0_1 = true;
// classify beta
bool beta_is_0 = false;
bool beta_in_0_1 = false;
bool beta_not_in_0_1 = false;
if ( (beta > EPSILON) && (beta < 1.0-EPSILON) )
beta_in_0_1 = true;
else
if (fabs(alpha) <= EPSILON)
beta_is_0 = true;
else
beta_not_in_0_1 = true;
//
// distinguish intersection types
//
if (alpha_in_0_1 && beta_in_0_1) return (5);
// return (X_OVERLAP);
if (alpha_not_in_0_1 && beta_in_0_1) return (6);
// return (T_OVERLAP_Q);
if (beta_not_in_0_1 && alpha_in_0_1) return (7);
// return (T_OVERLAP_P);
if (alpha_is_0 && beta_is_0) return (8);
// return (V_OVERLAP);
}
return (0);
// return (NO_INTERSECTION);
}
/*
-----------------------------------------------------------------
Function to calculate all intersections
Runs in GPU
Called from Host
-------------------------------------------------------------------
*/
__global__ void intersect(double *polyPX, double *polyPY, double *polyQX, double *polyQY, double *dev_intersectionsP, double *dev_intersectionsQ, int sizeP, int sizeQ){
int id=threadIdx.x;
double alpha;
double beta;
point I;
if(id>sizeP*sizeQ) return;
int count[9] = {0,0,0,0,0,0,0,0,0};
point P1, P2, Q1, Q2;
int pid=id/sizeQ;
int qid=(id+1)%sizeQ;
// neighbor types = {P1->0, Q1->1, I_P->2, I_Q->3}
// arrays to save neighbor information
int *neighborP;
int *neighborQ;
neighborP=(int *) malloc(sizeP*sizeQ*sizeof(int));
neighborQ=(int *) malloc(sizeP*sizeQ*sizeof(int));
P1.x = polyPX[pid];
P1.y = polyPY[pid];
P2.x = polyPX[pid+1];
P2.y = polyPY[pid+1];
Q1.x = polyQX[qid];
Q1.y = polyQY[qid];
Q2.x = polyQX[qid+1];
Q2.y = polyQY[qid+1];
// reset P2 vertex of last edge to first vertex
if(qid == sizeQ-1){
Q2.x = polyQX[0];
Q2.y = polyQY[0];
}
if(pid == sizeP-1){
P2.x = polyPX[0];
P2.y = polyPY[0];
}
// printf("%f %f %f %f %f %f %f %f \n", P1.x, P1.y, P2.x, P2.y, Q1.x, Q1.y, Q2.x, Q2.y);
//
// determine intersection or overlap type
//
// IntersectionType i = intersect(edgeP, edgeQ, alpha, beta);
int i = getIntersectType(P1, P2, Q1, Q2, alpha, beta);
printf("type %d\n", i);
// shared variable. removed temporarily
// count[i]++;
// vertex* P1 = edgeP.one;
// vertex* Q1 = edgeQ.one;
switch(i) {
//
// X-intersection
//
// case X_INTERSECTION:
case 1:
// I = (1.0-alpha)*edgeP.one->p + alpha*edgeP.two->p;
I = add(mulScalar((1.0-alpha), P1), mulScalar(alpha, P2));
// I_P = new vertex(I,alpha);
// I_Q = new vertex(I,beta);
// insertVertex(I_P, edgeP);
// insertVertex(I_Q, edgeQ);
// link(I_P, I_Q);
// printf("innn %f %f\n", I.x, I.y);
dev_intersectionsP[id*3]=I.x;
dev_intersectionsP[id*3+1]=I.y;
dev_intersectionsP[id*3+2]=alpha;
neighborP[id]=3;
dev_intersectionsQ[id*3]=I.x;
dev_intersectionsQ[id*3+1]=I.y;
dev_intersectionsQ[id*3+2]=beta;
neighborQ[id]=2;
break;
//
// X-overlap
//
// case X_OVERLAP:
case 5:
// I_Q = new vertex(P1->p, beta);
// insertVertex(I_Q, edgeQ);
// link(P1, I_Q);
// I_P = new vertex(Q1->p, alpha);
// insertVertex(I_P, edgeP);
// link(I_P, Q1);
dev_intersectionsQ[id*3]=P1.x;
dev_intersectionsQ[id*3+1]=P1.y;
dev_intersectionsQ[id*3+2]=beta;
neighborQ[id]=0;
dev_intersectionsP[id*3]=Q1.x;
dev_intersectionsP[id*3+1]=Q1.y;
dev_intersectionsP[id*3+2]=alpha;
neighborP[id]=1;
break;
//
// T-intersection or T_overlap on Q
//
// case T_INTERSECTION_Q:
// case T_OVERLAP_Q:
case 2:
case 6:
// I_Q = new vertex(P1->p, beta);
// insertVertex(I_Q, edgeQ);
// link(P1, I_Q);
dev_intersectionsQ[id*3]=P1.x;
dev_intersectionsQ[id*3+1]=P1.y;
dev_intersectionsQ[id*3+2]=beta;
neighborQ[id]=0;
break;
//
// T-intersection or T-overlap on P
//
// case T_INTERSECTION_P:
// case T_OVERLAP_P:
case 3:
case 7:
// I_P = new vertex(Q1->p, alpha);
// insertVertex(I_P, edgeP);
// link(I_P, Q1);
dev_intersectionsP[id*3]=Q1.x;
dev_intersectionsP[id*3+1]=Q1.y;
dev_intersectionsP[id*3+2]=alpha;
neighborP[id]=1;
break;
//
// V-intersection or V-overlap
//
// case V_INTERSECTION:
// case V_OVERLAP:
case 4:
case 8:
// link(P1,Q1);
neighborP[id]=1;
neighborQ[id]=0;
break;
}
}
/*
-----------------------------------------------------------------
Function to label all instersection points
Runs in GPU
Called from Device
-------------------------------------------------------------------
*/
__device__ void labelIntersectionPoints(double *polyPX, double *polyPY, double *polyQX, double *polyQY, double *dev_intersectionsP, double *dev_intersectionsQ, int *neighborP, int *neighborQ, int sizeP, int sizeQ){
int id=threadIdx.x;
// intersection point related to current thread
point I;
I.x=intersectionsP[id*3];
I.y=intersectionsP[id*3+1];
// determine local configuration at this intersection vertex
point P_m, P_p, Q_m, Q_p
P_m.x = polyPX[id]; //I->prev; // P-, predecessor of I on P
P_m.y = polyPY[id]; //I->prev; // P-, predecessor of I on P
P_p.x = polyPX[id+1]; //I->next; // P+, successor of I on P
P_p.y = polyPY[id+1]; //I->next; // P+, successor of I on P
// Q_m.x = polyQX[id]; //I->neighbour->prev; // Q-, predecessor of I on Q
// Q_m.y = polyQY[id]; //I->neighbour->prev; // Q-, predecessor of I on Q
// Q_p.x = polyQX[id+1]; //I->neighbour->next; // Q+, successor of I on P
// Q_p.y = polyQY[id+1]; //I->neighbour->next; // Q+, successor of I on P
// check if neghbor is not Q
if(neighborP[id] == 0){
Q_m.x = polyPX[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_m.y = polyPY[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_p.x = polyPX[id+1]; //I->neighbour->next; // Q+, successor of I on P
Q_p.y = polyPY[id+1]; //I->neighbour->next; // Q+, successor of I on P
} else if(neighborP[id] == 1){
Q_m.x = polyQX[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_m.y = polyQY[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_p.x = polyQX[id+1]; //I->neighbour->next; // Q+, successor of I on P
Q_p.y = polyQY[id+1]; //I->neighbour->next; // Q+, successor of I on P
} else if(neighborP[id] == 2){
Q_m.x = intersectionsP[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_m.y = intersectionsP[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_p.x = intersectionsP[id+1]; //I->neighbour->next; // Q+, successor of I on P
Q_p.y = intersectionsP[id+1]; //I->neighbour->next; // Q+, successor of I on P
} else if(neighborP[id] == 3){
Q_m.x = intersectionsQ[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_m.y = intersectionsQ[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_p.x = intersectionsQ[id+1]; //I->neighbour->next; // Q+, successor of I on P
Q_p.y = intersectionsQ[id+1]; //I->neighbour->next; // Q+, successor of I on P
}
}
__global__ void hellworld(int a, int *x) {
printf("HIIIII \n");
*x=a+10;
}
void calculateIntersections(double *polyPX, double *polyPY, double *polyQX, double *polyQY, int sizeP, int sizeQ){
// int j;
// for(j=0; j<sizeP; j++){
// printf("-> %f ", polyPX[j]);
// }
// printf("\n");
double *dev_polyPX, *dev_polyPY, *dev_polyQX, *dev_polyQY;
double *dev_intersectionsP, *dev_intersectionsQ;
double intersectionsP[sizeP*sizeQ*3], intersectionsQ[sizeP*sizeQ*3];
// Allocate memory in device
hipMalloc((void **) &dev_polyPX, sizeP*sizeof(double));
hipMalloc((void **) &dev_polyPY, sizeP*sizeof(double));
hipMalloc((void **) &dev_polyQX, sizeQ*sizeof(double));
hipMalloc((void **) &dev_polyQY, sizeQ*sizeof(double));
hipMalloc((void **) &dev_intersectionsP, 3*sizeP*sizeQ*sizeof(double));
hipMalloc((void **) &dev_intersectionsQ, 3*sizeP*sizeQ*sizeof(double));
// Copy input vectors from host memory to GPU buffers.
hipMemcpy(dev_polyPX, polyPX, sizeP*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_polyPY, polyPY, sizeP*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_polyQX, polyQX, sizeQ*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_polyQY, polyQY, sizeQ*sizeof(double), hipMemcpyHostToDevice);
printf("in cpu before\n");
// need to chage shape of kernel lanch******
hipLaunchKernelGGL(( intersect) , dim3(1), dim3(sizeP*sizeQ), 0, 0, dev_polyPX, dev_polyPY, dev_polyQX, dev_polyQY, dev_intersectionsP, dev_intersectionsQ, sizeP, sizeQ);
hipMemcpy(&intersectionsP, dev_intersectionsP, 3*sizeP*sizeQ*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(&intersectionsQ, dev_intersectionsQ, 3*sizeP*sizeQ*sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf("in cpu after\n");
int j;
for(j=0; sizeP*sizeQ>j; ++j){
printf(">> %f %f %f\n", intersectionsP[j*3], intersectionsP[j*3+1], intersectionsP[j*3+2]);
}
hipFree(dev_polyPX);
hipFree(dev_polyPY);
hipFree(dev_polyQX);
hipFree(dev_polyQY);
}
void testhello() {
printf("in cpu before\n");
int x;
int *dev_x;
hipMalloc((void**)&dev_x, sizeof(int));
hipLaunchKernelGGL(( hellworld) , dim3(1),dim3(2), 0, 0, 11, dev_x);
hipMemcpy(&x, dev_x, sizeof(int), hipMemcpyDeviceToHost);
printf("***== %d\n", x);
hipDeviceSynchronize();
printf("in cpu after\n");
}
// int main(){
// testhello();
// // hipDeviceSynchronize();
// return 0;
// } | eed73ce9670e5e4c352744353e4096efb053bf85.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define EPSILON 0.000000001
typedef struct
{
double x, y;
} point;
// compute twice the signed area of the triange [P,Q,R]
__device__ double A(const point& P, const point& Q, const point& R) {
return (Q.x-P.x) * (R.y-P.y) - (Q.y-P.y) * (R.x-P.x);
}
// difference of two 2D points
__device__ point sub(const point& a, const point& b) {
point r;
r.x=a.x-b.x;
r.y=a.y-b.y;
return r;
}
// add two 2D points
__device__ point add(const point& a, const point& b) {
point r;
r.x=a.x+b.x;
r.y=a.y+b.y;
return r;
}
// multiply two 2D points
__device__ double mul(const point& a, const point& b) {
point r;
r.x=a.x*b.x;
r.y=a.y*b.y;
return (r.x+r.y);
}
// multiply scalar with 2D points
__device__ point mulScalar(const double c, const point& b) {
point r;
r.x=c*b.x;
r.y=c*b.y;
return r;
}
/*
-----------------------------------------------------------------
Function to return intersection type
Runs in GPU
Called from Device
-------------------------------------------------------------------
*/
// __device__ int getIntersectType(const edge& edgeP, const edge& edgeQ, double& alpha, double& beta) {
__device__ int getIntersectType(const point& P1, const point& P2, const point& Q1, const point& Q2, double& alpha, double& beta) {
// const point2D& P1 = edgeP.one->p;
// const point2D& P2 = edgeP.two->p;
// const point2D& Q1 = edgeQ.one->p;
// const point2D& Q2 = edgeQ.two->p;
double AP1 = A(P1,Q1,Q2);
double AP2 = A(P2,Q1,Q2);
if (fabs(AP1-AP2) > EPSILON) {
// from here: [P1,P2] and [Q1,Q2] are not parallel
//
// analyse potential intersection
//
double AQ1 = A(Q1,P1,P2);
double AQ2 = A(Q2,P1,P2);
// compute alpha and beta
alpha = AP1 / (AP1-AP2);
beta = AQ1 / (AQ1-AQ2);
// classify alpha
bool alpha_is_0 = false;
bool alpha_in_0_1 = false;
if ( (alpha > EPSILON) && (alpha < 1.0-EPSILON) )
alpha_in_0_1 = true;
else
if (fabs(alpha) <= EPSILON)
alpha_is_0 = true;
// classify beta
bool beta_is_0 = false;
bool beta_in_0_1 = false;
if ( (beta > EPSILON) && (beta < 1.0-EPSILON) )
beta_in_0_1 = true;
else
if (fabs(beta) <= EPSILON)
beta_is_0 = true;
//
// distinguish intersection types
//
if (alpha_in_0_1 && beta_in_0_1) return (1);
// return (X_INTERSECTION);
if (alpha_is_0 && beta_in_0_1) return (2);
// return (T_INTERSECTION_Q);
if (beta_is_0 && alpha_in_0_1) return (3);
// return (T_INTERSECTION_P);
if (alpha_is_0 && beta_is_0) return (4);
// return (V_INTERSECTION);
}
else
if (fabs(AP1) < EPSILON) {
// from here: [P1,P2] and [Q1,Q2] are collinear
//
// analyse potential overlap
//
// point2D dP = P2-P1;
// point2D dQ = Q2-Q1;
// point2D PQ = Q1-P1;
point dP = sub(P2, P1);
point dQ = sub(Q2, Q1);
point PQ = sub(Q1, P1);
// compute alpha and beta
// alpha = (PQ*dP) / (dP*dP);
// beta = -(PQ*dQ) / (dQ*dQ);
alpha = mul(PQ,dP) / mul(dP,dP);
beta = -mul(PQ,dQ) / mul(dQ,dQ);
// classify alpha
bool alpha_is_0 = false;
bool alpha_in_0_1 = false;
bool alpha_not_in_0_1 = false;
if ( (alpha > EPSILON) && (alpha < 1.0-EPSILON) )
alpha_in_0_1 = true;
else
if (fabs(alpha) <= EPSILON)
alpha_is_0 = true;
else
alpha_not_in_0_1 = true;
// classify beta
bool beta_is_0 = false;
bool beta_in_0_1 = false;
bool beta_not_in_0_1 = false;
if ( (beta > EPSILON) && (beta < 1.0-EPSILON) )
beta_in_0_1 = true;
else
if (fabs(alpha) <= EPSILON)
beta_is_0 = true;
else
beta_not_in_0_1 = true;
//
// distinguish intersection types
//
if (alpha_in_0_1 && beta_in_0_1) return (5);
// return (X_OVERLAP);
if (alpha_not_in_0_1 && beta_in_0_1) return (6);
// return (T_OVERLAP_Q);
if (beta_not_in_0_1 && alpha_in_0_1) return (7);
// return (T_OVERLAP_P);
if (alpha_is_0 && beta_is_0) return (8);
// return (V_OVERLAP);
}
return (0);
// return (NO_INTERSECTION);
}
/*
-----------------------------------------------------------------
Function to calculate all intersections
Runs in GPU
Called from Host
-------------------------------------------------------------------
*/
__global__ void intersect(double *polyPX, double *polyPY, double *polyQX, double *polyQY, double *dev_intersectionsP, double *dev_intersectionsQ, int sizeP, int sizeQ){
int id=threadIdx.x;
double alpha;
double beta;
point I;
if(id>sizeP*sizeQ) return;
int count[9] = {0,0,0,0,0,0,0,0,0};
point P1, P2, Q1, Q2;
int pid=id/sizeQ;
int qid=(id+1)%sizeQ;
// neighbor types = {P1->0, Q1->1, I_P->2, I_Q->3}
// arrays to save neighbor information
int *neighborP;
int *neighborQ;
neighborP=(int *) malloc(sizeP*sizeQ*sizeof(int));
neighborQ=(int *) malloc(sizeP*sizeQ*sizeof(int));
P1.x = polyPX[pid];
P1.y = polyPY[pid];
P2.x = polyPX[pid+1];
P2.y = polyPY[pid+1];
Q1.x = polyQX[qid];
Q1.y = polyQY[qid];
Q2.x = polyQX[qid+1];
Q2.y = polyQY[qid+1];
// reset P2 vertex of last edge to first vertex
if(qid == sizeQ-1){
Q2.x = polyQX[0];
Q2.y = polyQY[0];
}
if(pid == sizeP-1){
P2.x = polyPX[0];
P2.y = polyPY[0];
}
// printf("%f %f %f %f %f %f %f %f \n", P1.x, P1.y, P2.x, P2.y, Q1.x, Q1.y, Q2.x, Q2.y);
//
// determine intersection or overlap type
//
// IntersectionType i = intersect(edgeP, edgeQ, alpha, beta);
int i = getIntersectType(P1, P2, Q1, Q2, alpha, beta);
printf("type %d\n", i);
// shared variable. removed temporarily
// count[i]++;
// vertex* P1 = edgeP.one;
// vertex* Q1 = edgeQ.one;
switch(i) {
//
// X-intersection
//
// case X_INTERSECTION:
case 1:
// I = (1.0-alpha)*edgeP.one->p + alpha*edgeP.two->p;
I = add(mulScalar((1.0-alpha), P1), mulScalar(alpha, P2));
// I_P = new vertex(I,alpha);
// I_Q = new vertex(I,beta);
// insertVertex(I_P, edgeP);
// insertVertex(I_Q, edgeQ);
// link(I_P, I_Q);
// printf("innn %f %f\n", I.x, I.y);
dev_intersectionsP[id*3]=I.x;
dev_intersectionsP[id*3+1]=I.y;
dev_intersectionsP[id*3+2]=alpha;
neighborP[id]=3;
dev_intersectionsQ[id*3]=I.x;
dev_intersectionsQ[id*3+1]=I.y;
dev_intersectionsQ[id*3+2]=beta;
neighborQ[id]=2;
break;
//
// X-overlap
//
// case X_OVERLAP:
case 5:
// I_Q = new vertex(P1->p, beta);
// insertVertex(I_Q, edgeQ);
// link(P1, I_Q);
// I_P = new vertex(Q1->p, alpha);
// insertVertex(I_P, edgeP);
// link(I_P, Q1);
dev_intersectionsQ[id*3]=P1.x;
dev_intersectionsQ[id*3+1]=P1.y;
dev_intersectionsQ[id*3+2]=beta;
neighborQ[id]=0;
dev_intersectionsP[id*3]=Q1.x;
dev_intersectionsP[id*3+1]=Q1.y;
dev_intersectionsP[id*3+2]=alpha;
neighborP[id]=1;
break;
//
// T-intersection or T_overlap on Q
//
// case T_INTERSECTION_Q:
// case T_OVERLAP_Q:
case 2:
case 6:
// I_Q = new vertex(P1->p, beta);
// insertVertex(I_Q, edgeQ);
// link(P1, I_Q);
dev_intersectionsQ[id*3]=P1.x;
dev_intersectionsQ[id*3+1]=P1.y;
dev_intersectionsQ[id*3+2]=beta;
neighborQ[id]=0;
break;
//
// T-intersection or T-overlap on P
//
// case T_INTERSECTION_P:
// case T_OVERLAP_P:
case 3:
case 7:
// I_P = new vertex(Q1->p, alpha);
// insertVertex(I_P, edgeP);
// link(I_P, Q1);
dev_intersectionsP[id*3]=Q1.x;
dev_intersectionsP[id*3+1]=Q1.y;
dev_intersectionsP[id*3+2]=alpha;
neighborP[id]=1;
break;
//
// V-intersection or V-overlap
//
// case V_INTERSECTION:
// case V_OVERLAP:
case 4:
case 8:
// link(P1,Q1);
neighborP[id]=1;
neighborQ[id]=0;
break;
}
}
/*
-----------------------------------------------------------------
Function to label all instersection points
Runs in GPU
Called from Device
-------------------------------------------------------------------
*/
__device__ void labelIntersectionPoints(double *polyPX, double *polyPY, double *polyQX, double *polyQY, double *dev_intersectionsP, double *dev_intersectionsQ, int *neighborP, int *neighborQ, int sizeP, int sizeQ){
int id=threadIdx.x;
// intersection point related to current thread
point I;
I.x=intersectionsP[id*3];
I.y=intersectionsP[id*3+1];
// determine local configuration at this intersection vertex
point P_m, P_p, Q_m, Q_p
P_m.x = polyPX[id]; //I->prev; // P-, predecessor of I on P
P_m.y = polyPY[id]; //I->prev; // P-, predecessor of I on P
P_p.x = polyPX[id+1]; //I->next; // P+, successor of I on P
P_p.y = polyPY[id+1]; //I->next; // P+, successor of I on P
// Q_m.x = polyQX[id]; //I->neighbour->prev; // Q-, predecessor of I on Q
// Q_m.y = polyQY[id]; //I->neighbour->prev; // Q-, predecessor of I on Q
// Q_p.x = polyQX[id+1]; //I->neighbour->next; // Q+, successor of I on P
// Q_p.y = polyQY[id+1]; //I->neighbour->next; // Q+, successor of I on P
// check if neghbor is not Q
if(neighborP[id] == 0){
Q_m.x = polyPX[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_m.y = polyPY[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_p.x = polyPX[id+1]; //I->neighbour->next; // Q+, successor of I on P
Q_p.y = polyPY[id+1]; //I->neighbour->next; // Q+, successor of I on P
} else if(neighborP[id] == 1){
Q_m.x = polyQX[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_m.y = polyQY[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_p.x = polyQX[id+1]; //I->neighbour->next; // Q+, successor of I on P
Q_p.y = polyQY[id+1]; //I->neighbour->next; // Q+, successor of I on P
} else if(neighborP[id] == 2){
Q_m.x = intersectionsP[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_m.y = intersectionsP[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_p.x = intersectionsP[id+1]; //I->neighbour->next; // Q+, successor of I on P
Q_p.y = intersectionsP[id+1]; //I->neighbour->next; // Q+, successor of I on P
} else if(neighborP[id] == 3){
Q_m.x = intersectionsQ[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_m.y = intersectionsQ[id-1]; //I->neighbour->prev; // Q-, predecessor of I on Q
Q_p.x = intersectionsQ[id+1]; //I->neighbour->next; // Q+, successor of I on P
Q_p.y = intersectionsQ[id+1]; //I->neighbour->next; // Q+, successor of I on P
}
}
__global__ void hellworld(int a, int *x) {
printf("HIIIII \n");
*x=a+10;
}
void calculateIntersections(double *polyPX, double *polyPY, double *polyQX, double *polyQY, int sizeP, int sizeQ){
// int j;
// for(j=0; j<sizeP; j++){
// printf("-> %f ", polyPX[j]);
// }
// printf("\n");
double *dev_polyPX, *dev_polyPY, *dev_polyQX, *dev_polyQY;
double *dev_intersectionsP, *dev_intersectionsQ;
double intersectionsP[sizeP*sizeQ*3], intersectionsQ[sizeP*sizeQ*3];
// Allocate memory in device
cudaMalloc((void **) &dev_polyPX, sizeP*sizeof(double));
cudaMalloc((void **) &dev_polyPY, sizeP*sizeof(double));
cudaMalloc((void **) &dev_polyQX, sizeQ*sizeof(double));
cudaMalloc((void **) &dev_polyQY, sizeQ*sizeof(double));
cudaMalloc((void **) &dev_intersectionsP, 3*sizeP*sizeQ*sizeof(double));
cudaMalloc((void **) &dev_intersectionsQ, 3*sizeP*sizeQ*sizeof(double));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(dev_polyPX, polyPX, sizeP*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_polyPY, polyPY, sizeP*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_polyQX, polyQX, sizeQ*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_polyQY, polyQY, sizeQ*sizeof(double), cudaMemcpyHostToDevice);
printf("in cpu before\n");
// need to chage shape of kernel lanch******
intersect <<<1, sizeP*sizeQ>>> (dev_polyPX, dev_polyPY, dev_polyQX, dev_polyQY, dev_intersectionsP, dev_intersectionsQ, sizeP, sizeQ);
cudaMemcpy(&intersectionsP, dev_intersectionsP, 3*sizeP*sizeQ*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&intersectionsQ, dev_intersectionsQ, 3*sizeP*sizeQ*sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("in cpu after\n");
int j;
for(j=0; sizeP*sizeQ>j; ++j){
printf(">> %f %f %f\n", intersectionsP[j*3], intersectionsP[j*3+1], intersectionsP[j*3+2]);
}
cudaFree(dev_polyPX);
cudaFree(dev_polyPY);
cudaFree(dev_polyQX);
cudaFree(dev_polyQY);
}
void testhello() {
printf("in cpu before\n");
int x;
int *dev_x;
cudaMalloc((void**)&dev_x, sizeof(int));
hellworld <<<1,2>>> (11, dev_x);
cudaMemcpy(&x, dev_x, sizeof(int), cudaMemcpyDeviceToHost);
printf("***== %d\n", x);
cudaDeviceSynchronize();
printf("in cpu after\n");
}
// int main(){
// testhello();
// // cudaDeviceSynchronize();
// return 0;
// } |
77e0dd04164f39b7684883c3382ad066a48bfb8e.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
// int bx = blockIdx.x;
// int by = blockIdx.y;
// Thread index
// int tx = threadIdx.x;
// int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
// float aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
// float aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
// float aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
// float bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
// float bStep = BLOCK_SIZE * wB;
float sum = 00.1;
float fsum = 00.2;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
// for (int a = aBegin, b = bBegin;
// a <= aEnd;
// a += aStep, b += bStep)
// {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
// __shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
// __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
// As[ty][tx] = A[a + wA * ty + tx];
// Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
//__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
int16_t result;
float qqq =0;
float x_counter = 0.0;
asm(".reg .f32 t1;\n\t");
asm(".reg .u16 t2, t3, t4;\n\t");
if (0) {
for (int k = 0; k < BLOCK_SIZE; ++k) {
//for (float k = 0.1; k < 32.9; k = k+0.99)
//{
while (x_counter < 1000000) {
asm("mul.f32 %0, %3, t1, %2;\n\t"
"mul.lo.u16 t2, t3, t4;\n\t"
"mul.f32 t1, %0, t1, %3;\n\t"
"mul.f32 t1, t1, t1, %2;\n\t"
"mul.lo.u16 t2, t3, t4;\n\t"
"mul.f32 t1, %0, t1, %0;\n\t"
"mul.f32 %0, t1, t1, %0;\n\t"
"mul.f32 t1, %0, t1, %0;\n\t"
"mul.f32 t1, t1, t1, %2;\n\t"
"mul.f32 t1, %0, t1, %0;\n\t"
"mul.lo.u16 t4, t3, t2;\n\t"
"mul.f32 %0, t1, %0, %3;\n\t"
"mul.f32 t1, %0, %3, %0;\n\t"
"mul.f32 t1, t1, %2, %0;\n\t"
"mul.f32 t1, %0, %0, %3;\n\t"
"mul.lo.u16 t2, t2, t4;\n\t"
"mul.f32 %0, t1, %0, t1;\n\t"
"mul.f32 t1, t1, %0, %0;\n\t"
"mul.lo.u16 %1, t2, t4;\n\t"
"mul.f32 %0, t1, %0, t1;\n\t": "=f"(qqq), "=h"(result): "f"(sum), "f"(fsum) );
x_counter += 1.0;
// }
//qqq += k*k;
//sum += qqq*qqq/(qqq*2.3);
//sum += (a+b+k)*qqq;
//Csub += As[ty][k] * Bs[k][tx] + sum;
}
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
//__syncthreads();
}
if (0) {
//if (threadIdx.y % 2 == 0) {
//for (int k = 0; k < BLOCK_SIZE; ++k) {
//for (float k = 0.1; k < 32.9; k = k+0.99)
//{
while (x_counter < 10000000) {
asm("mul.lo.u16 t2, %1, t4;\n\t"
"mul.f32 %0, t1, %0;\n\t"
"mul.lo.u16 t2, t2, t4;\n\t"
"mul.f32 t1, t1, %0;\n\t"
"mul.lo.u16 t2, t2, t4;\n\t"
"mul.f32 t1, t1, %0;\n\t"
"mul.lo.u16 t4, t3, t2;\n\t"
"mul.f32 %0, t1, %0;\n\t"
"mul.lo.u16 %1, t2, t4;\n\t"
"mul.f32 %0, t1, %2;\n\t": "=f"(qqq), "=h"(result): "f"(sum), "f"(fsum) );
x_counter += 1.0;
}
//}
} else if (1) {
//for (int k = 0; k < BLOCK_SIZE; ++k) {
//for (float k = 0.1; k < 32.9; k = k+0.99)
//{
while (x_counter < 10000000) {
asm("mul.lo.u16 t2, %1, t4;\n\t"
"mul.lo.u16 t4, t3, t2;\n\t"
"mul.lo.u16 t2, t2, t4;\n\t"
"mul.lo.u16 t3, t3, t2;\n\t"
"mul.lo.u16 t2, t2, t4;\n\t"
"mul.lo.u16 t2, t3, t4;\n\t"
"mul.lo.u16 t4, t3, t2;\n\t"
"mul.lo.u16 t2, t2, t4;\n\t"
"mul.lo.u16 t3, t3, t2;\n\t"
"mul.lo.u16 %1, t3, t2;\n\t": "=f"(qqq), "=h"(result): "f"(sum), "f"(fsum) );
x_counter += 1.0;
}
//}
}else {
//for (int k = 0; k < BLOCK_SIZE; ++k) {
//for (float k = 0.1; k < 32.9; k = k+0.99)
//{
while (x_counter < 10000000) {
asm("mul.f32 t1, %0, t1;\n\t"
"mul.f32 %0, t1, %0;\n\t"
"mul.f32 t1, %0, %0;\n\t"
"mul.f32 %2, t1, %0;\n\t"
"mul.f32 %2, t1, %0;\n\t"
"mul.f32 %0, t1, %0;\n\t"
"mul.f32 t1, t1, %2;\n\t"
"mul.f32 %2, t1, %0;\n\t"
"mul.f32 %2, t1, %0;\n\t"
"mul.f32 %0, t1, %2;\n\t": "=f"(qqq), "=h"(result): "f"(sum), "f"(fsum) );
x_counter += 1.0;
}
//}
}
// Write the block sub-matrix to device memory;
// each thread writes one element
//int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
//C[c + wB * ty + tx] = Csub;
C[0] = qqq+result;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA_int(int *C, int *A, int *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
int sum = 0;
int fsum = 0;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
int Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ int As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
//__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
int qqq =0;
int x_counter = 0;
asm(".reg .u32 t1;\n\t");
for (int k = 0; k < BLOCK_SIZE; ++k)
{
while (x_counter < 1000000) {
asm("mul.u32 %0, %1, %2;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, %0, t1;\n\t": "=r"(qqq): "r"(As[ty][k]), "r"(Bs[k][tx]) );
x_counter += 1;
}
//qqq += k*k;
//fsum += qqq*qqq/(qqq*3);
//sum += a+b+k;
//Csub += As[ty][k] * Bs[k][tx]+sum;
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
//__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
//int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
//C[c + wB * ty + tx] = Csub;
}
void constantInit_int(int *data, int size, int val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
int streamNum = 1;
if (checkCmdLineFlag(argc, (const char **)argv, "streams"))
{
streamNum = getCmdLineArgumentInt(argc, (const char **)argv, "streams");
}
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
unsigned int mem_size_A_double = sizeof(int) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
unsigned int mem_size_B_double = sizeof(int) * size_B;
float *h_B = (float *)malloc(mem_size_B);
int *h_A_double = (int *)malloc(mem_size_A_double);
int *h_B_double = (int *)malloc(mem_size_B_double);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
constantInit_int(h_A_double, size_A, 2);
constantInit_int(h_B_double, size_B, 23);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate device memory
int *d_A_double, *d_B_double, *d_C_double;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
unsigned int mem_size_C_double = dimsC.x * dimsC.y * sizeof(int);
int *h_C_double = (int *) malloc(mem_size_C_double);
// allocate and initialize an array of stream handles
hipStream_t *streams = (hipStream_t *) malloc(streamNum * sizeof(hipStream_t));
for (int i = 0; i < streamNum; i++)
{
checkCudaErrors(hipStreamCreate(&(streams[i])));
}
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_A_double, mem_size_A_double);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B_double, mem_size_B_double);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C_double, mem_size_C_double);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A_double, h_A_double, mem_size_A_double, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B_double, h_B_double, mem_size_B_double, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
// matrixMulCUDA<16><<< grid, threads, 0,streams[0] >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
// matrixMulCUDA<32><<< grid, threads, 0, streams[0] >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 4;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads),0, streams[j%streamNum] , d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads),0, streams[j%streamNum] , d_C, d_A, d_B, dimsA.x, dimsB.x);
}
if (block_size == 16)
{
//matrixMulCUDA_int<16><<< grid, threads,0, streams[(j+1)%streamNum] >>>(d_C_double, d_A_double, d_B_double, dimsA.x, dimsB.x);
}
else
{
// matrixMulCUDA_int<32><<< grid, threads,0, streams[(j+1)%streamNum] >>>(d_C_double, d_A_double, d_B_double, dimsA.x, dimsB.x);
}
}
// Record the start event
error = hipEventRecord(start, NULL);
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A_double);
free(h_B_double);
free(h_C_double);
hipFree(d_A_double);
hipFree(d_B_double);
hipFree(d_C_double);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
hipSetDevice(devID);
}
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
if ((deviceProp.concurrentKernels == 0))
{
printf("> GPU does not support concurrent kernel execution\n");
printf(" CUDA kernel runs will be serialized\n");
}
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
int block_size = 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| 77e0dd04164f39b7684883c3382ad066a48bfb8e.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
// int bx = blockIdx.x;
// int by = blockIdx.y;
// Thread index
// int tx = threadIdx.x;
// int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
// float aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
// float aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
// float aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
// float bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
// float bStep = BLOCK_SIZE * wB;
float sum = 00.1;
float fsum = 00.2;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
// for (int a = aBegin, b = bBegin;
// a <= aEnd;
// a += aStep, b += bStep)
// {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
// __shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
// __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
// As[ty][tx] = A[a + wA * ty + tx];
// Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
//__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
int16_t result;
float qqq =0;
float x_counter = 0.0;
asm(".reg .f32 t1;\n\t");
asm(".reg .u16 t2, t3, t4;\n\t");
if (0) {
for (int k = 0; k < BLOCK_SIZE; ++k) {
//for (float k = 0.1; k < 32.9; k = k+0.99)
//{
while (x_counter < 1000000) {
asm("mul.f32 %0, %3, t1, %2;\n\t"
"mul.lo.u16 t2, t3, t4;\n\t"
"mul.f32 t1, %0, t1, %3;\n\t"
"mul.f32 t1, t1, t1, %2;\n\t"
"mul.lo.u16 t2, t3, t4;\n\t"
"mul.f32 t1, %0, t1, %0;\n\t"
"mul.f32 %0, t1, t1, %0;\n\t"
"mul.f32 t1, %0, t1, %0;\n\t"
"mul.f32 t1, t1, t1, %2;\n\t"
"mul.f32 t1, %0, t1, %0;\n\t"
"mul.lo.u16 t4, t3, t2;\n\t"
"mul.f32 %0, t1, %0, %3;\n\t"
"mul.f32 t1, %0, %3, %0;\n\t"
"mul.f32 t1, t1, %2, %0;\n\t"
"mul.f32 t1, %0, %0, %3;\n\t"
"mul.lo.u16 t2, t2, t4;\n\t"
"mul.f32 %0, t1, %0, t1;\n\t"
"mul.f32 t1, t1, %0, %0;\n\t"
"mul.lo.u16 %1, t2, t4;\n\t"
"mul.f32 %0, t1, %0, t1;\n\t": "=f"(qqq), "=h"(result): "f"(sum), "f"(fsum) );
x_counter += 1.0;
// }
//qqq += k*k;
//sum += qqq*qqq/(qqq*2.3);
//sum += (a+b+k)*qqq;
//Csub += As[ty][k] * Bs[k][tx] + sum;
}
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
//__syncthreads();
}
if (0) {
//if (threadIdx.y % 2 == 0) {
//for (int k = 0; k < BLOCK_SIZE; ++k) {
//for (float k = 0.1; k < 32.9; k = k+0.99)
//{
while (x_counter < 10000000) {
asm("mul.lo.u16 t2, %1, t4;\n\t"
"mul.f32 %0, t1, %0;\n\t"
"mul.lo.u16 t2, t2, t4;\n\t"
"mul.f32 t1, t1, %0;\n\t"
"mul.lo.u16 t2, t2, t4;\n\t"
"mul.f32 t1, t1, %0;\n\t"
"mul.lo.u16 t4, t3, t2;\n\t"
"mul.f32 %0, t1, %0;\n\t"
"mul.lo.u16 %1, t2, t4;\n\t"
"mul.f32 %0, t1, %2;\n\t": "=f"(qqq), "=h"(result): "f"(sum), "f"(fsum) );
x_counter += 1.0;
}
//}
} else if (1) {
//for (int k = 0; k < BLOCK_SIZE; ++k) {
//for (float k = 0.1; k < 32.9; k = k+0.99)
//{
while (x_counter < 10000000) {
asm("mul.lo.u16 t2, %1, t4;\n\t"
"mul.lo.u16 t4, t3, t2;\n\t"
"mul.lo.u16 t2, t2, t4;\n\t"
"mul.lo.u16 t3, t3, t2;\n\t"
"mul.lo.u16 t2, t2, t4;\n\t"
"mul.lo.u16 t2, t3, t4;\n\t"
"mul.lo.u16 t4, t3, t2;\n\t"
"mul.lo.u16 t2, t2, t4;\n\t"
"mul.lo.u16 t3, t3, t2;\n\t"
"mul.lo.u16 %1, t3, t2;\n\t": "=f"(qqq), "=h"(result): "f"(sum), "f"(fsum) );
x_counter += 1.0;
}
//}
}else {
//for (int k = 0; k < BLOCK_SIZE; ++k) {
//for (float k = 0.1; k < 32.9; k = k+0.99)
//{
while (x_counter < 10000000) {
asm("mul.f32 t1, %0, t1;\n\t"
"mul.f32 %0, t1, %0;\n\t"
"mul.f32 t1, %0, %0;\n\t"
"mul.f32 %2, t1, %0;\n\t"
"mul.f32 %2, t1, %0;\n\t"
"mul.f32 %0, t1, %0;\n\t"
"mul.f32 t1, t1, %2;\n\t"
"mul.f32 %2, t1, %0;\n\t"
"mul.f32 %2, t1, %0;\n\t"
"mul.f32 %0, t1, %2;\n\t": "=f"(qqq), "=h"(result): "f"(sum), "f"(fsum) );
x_counter += 1.0;
}
//}
}
// Write the block sub-matrix to device memory;
// each thread writes one element
//int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
//C[c + wB * ty + tx] = Csub;
C[0] = qqq+result;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA_int(int *C, int *A, int *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
int sum = 0;
int fsum = 0;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
int Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ int As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
//__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
int qqq =0;
int x_counter = 0;
asm(".reg .u32 t1;\n\t");
for (int k = 0; k < BLOCK_SIZE; ++k)
{
while (x_counter < 1000000) {
asm("mul.u32 %0, %1, %2;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, %0, %1;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 t1, t1, %2;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, t1, %0;\n\t"
"mul.u32 t1, %0, %0;\n\t"
"mul.u32 %0, %0, t1;\n\t": "=r"(qqq): "r"(As[ty][k]), "r"(Bs[k][tx]) );
x_counter += 1;
}
//qqq += k*k;
//fsum += qqq*qqq/(qqq*3);
//sum += a+b+k;
//Csub += As[ty][k] * Bs[k][tx]+sum;
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
//__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
//int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
//C[c + wB * ty + tx] = Csub;
}
void constantInit_int(int *data, int size, int val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
int streamNum = 1;
if (checkCmdLineFlag(argc, (const char **)argv, "streams"))
{
streamNum = getCmdLineArgumentInt(argc, (const char **)argv, "streams");
}
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
unsigned int mem_size_A_double = sizeof(int) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
unsigned int mem_size_B_double = sizeof(int) * size_B;
float *h_B = (float *)malloc(mem_size_B);
int *h_A_double = (int *)malloc(mem_size_A_double);
int *h_B_double = (int *)malloc(mem_size_B_double);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
constantInit_int(h_A_double, size_A, 2);
constantInit_int(h_B_double, size_B, 23);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate device memory
int *d_A_double, *d_B_double, *d_C_double;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
unsigned int mem_size_C_double = dimsC.x * dimsC.y * sizeof(int);
int *h_C_double = (int *) malloc(mem_size_C_double);
// allocate and initialize an array of stream handles
cudaStream_t *streams = (cudaStream_t *) malloc(streamNum * sizeof(cudaStream_t));
for (int i = 0; i < streamNum; i++)
{
checkCudaErrors(cudaStreamCreate(&(streams[i])));
}
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_A_double, mem_size_A_double);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B_double, mem_size_B_double);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C_double, mem_size_C_double);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A_double, h_A_double, mem_size_A_double, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B_double, h_B_double, mem_size_B_double, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
// matrixMulCUDA<16><<< grid, threads, 0,streams[0] >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
// matrixMulCUDA<32><<< grid, threads, 0, streams[0] >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 4;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads,0, streams[j%streamNum] >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads,0, streams[j%streamNum] >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
if (block_size == 16)
{
//matrixMulCUDA_int<16><<< grid, threads,0, streams[(j+1)%streamNum] >>>(d_C_double, d_A_double, d_B_double, dimsA.x, dimsB.x);
}
else
{
// matrixMulCUDA_int<32><<< grid, threads,0, streams[(j+1)%streamNum] >>>(d_C_double, d_A_double, d_B_double, dimsA.x, dimsB.x);
}
}
// Record the start event
error = cudaEventRecord(start, NULL);
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A_double);
free(h_B_double);
free(h_C_double);
cudaFree(d_A_double);
cudaFree(d_B_double);
cudaFree(d_C_double);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
if ((deviceProp.concurrentKernels == 0))
{
printf("> GPU does not support concurrent kernel execution\n");
printf(" CUDA kernel runs will be serialized\n");
}
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
int block_size = 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
9116152e86925289a739bbd4e71a8ed7df99a1d1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "CHECK.h"
#include "h_vecScalarMult.h"
/* h_vecScalarMult
Uses the CPU (the host) to multiply a vector by a scalar.
A is a pointer to the input vector.
K is the scalar value to use in the multiply
The result is stored in the vector pointed to by R.
n is the length of the vectors.
returns the amount of time it takes to perform the
vector scalar multiply
*/
float h_vecScalarMult(float* A, float * R, float K, int n)
{
hipEvent_t start_cpu, stop_cpu;
float cpuMsecTime = -1;
//Use cuda functions to do the timing
//create event objects
CHECK(hipEventCreate(&start_cpu));
CHECK(hipEventCreate(&stop_cpu));
//record the starting time
CHECK(hipEventRecord(start_cpu));
int i;
for (i = 0; i < n; i++)
{
R[i] = A[i] * K;
}
//record the ending time and wait for event to complete
CHECK(hipEventRecord(stop_cpu));
CHECK(hipEventSynchronize(stop_cpu));
//calculate the elapsed time between the two events
CHECK(hipEventElapsedTime(&cpuMsecTime, start_cpu, stop_cpu));
return cpuMsecTime;
}
| 9116152e86925289a739bbd4e71a8ed7df99a1d1.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "CHECK.h"
#include "h_vecScalarMult.h"
/* h_vecScalarMult
Uses the CPU (the host) to multiply a vector by a scalar.
A is a pointer to the input vector.
K is the scalar value to use in the multiply
The result is stored in the vector pointed to by R.
n is the length of the vectors.
returns the amount of time it takes to perform the
vector scalar multiply
*/
float h_vecScalarMult(float* A, float * R, float K, int n)
{
cudaEvent_t start_cpu, stop_cpu;
float cpuMsecTime = -1;
//Use cuda functions to do the timing
//create event objects
CHECK(cudaEventCreate(&start_cpu));
CHECK(cudaEventCreate(&stop_cpu));
//record the starting time
CHECK(cudaEventRecord(start_cpu));
int i;
for (i = 0; i < n; i++)
{
R[i] = A[i] * K;
}
//record the ending time and wait for event to complete
CHECK(cudaEventRecord(stop_cpu));
CHECK(cudaEventSynchronize(stop_cpu));
//calculate the elapsed time between the two events
CHECK(cudaEventElapsedTime(&cpuMsecTime, start_cpu, stop_cpu));
return cpuMsecTime;
}
|
7b5e023ee5dc065f347418c5100d59005874e926.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <ctime>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "hipsparse.h"
#include <iostream>
#include <iomanip>
#include <cmath>
#include "mmio.h"
#include <float.h>
#include <omp.h>
//#include "anonymouslib_cuda.h"
#include <hip/hip_runtime_api.h>
#include "spmv_kernel.h"
#include <limits>
using namespace std;
void print_error(hipsparseStatus_t status) {
if (status == HIPSPARSE_STATUS_NOT_INITIALIZED)
cout << "HIPSPARSE_STATUS_NOT_INITIALIZED" << endl;
else if (status == HIPSPARSE_STATUS_ALLOC_FAILED)
cout << "HIPSPARSE_STATUS_ALLOC_FAILED" << endl;
else if (status == HIPSPARSE_STATUS_INVALID_VALUE)
cout << "HIPSPARSE_STATUS_INVALID_VALUE" << endl;
else if (status == HIPSPARSE_STATUS_ARCH_MISMATCH)
cout << "HIPSPARSE_STATUS_ARCH_MISMATCH" << endl;
else if (status == HIPSPARSE_STATUS_INTERNAL_ERROR)
cout << "HIPSPARSE_STATUS_INTERNAL_ERROR" << endl;
else if (status == HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED)
cout << "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED" << endl;
}
int main(int argc, char *argv[]) {
if (argc < 6) {
cout << "Incorrect number of arguments!" << endl;
cout << "Usage ./spmv [input matrix file] [number of GPU(s)] [number of test(s)] [kernel version (1-3)] [data type ('f' or 'b')]" << endl;
return -1;
}
char input_type = argv[1][0];
char * filename = argv[2];
int ngpu = atoi(argv[3]);
int repeat_test = atoi(argv[4]);
int kernel_version = atoi(argv[5]);
//int divide = atoi(argv[7]);
//int copy_of_workspace = atoi(argv[8]);
int ret_code;
MM_typecode matcode;
FILE *f;
int m, n;
long long nnz;
int * cooRowIndex;
int * cooColIndex;
double * cooVal;
long long * csrRowPtr;
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount < ngpu) {
cout << "Error: Not enough number of GPUs. Only " << deviceCount << "available." << endl;
return -1;
}
if (ngpu <= 0) {
cout << "Error: Number of GPU(s) needs to be greater than 0." << endl;
return -1;
}
if (kernel_version != 1 && kernel_version != 2 && kernel_version != 3) {
cout << "Error: The kernel version can only be: 1, 2, or 3." << endl;
return -1;
}
// if (divide <= 0) {
// cout << "Error: Number of tasks needs to be greater than 0." << endl;
// return -1;
// }
// if (copy_of_workspace <= 0) {
// cout << "Error: Number of Hyper-Q needs to be greater than 0." << endl;
// return -1;
// }
cout << "Using " << ngpu << " GPU(s)." << endl;
cout << "Kernel #" << kernel_version << " is selected." << endl;
//cout << divide << "total task(s) will be generated for version 2 with "<< copy_of_workspace << " Hyper-Q(s) on each GPU." << endl;
if (input_type == 'f') {
cout << "Loading input matrix from " << filename << endl;
if ((f = fopen(filename, "r")) == NULL) {
exit(1);
}
if (mm_read_banner(f, &matcode) != 0) {
printf("Could not process Matrix Market banner.\n");
exit(1);
}
int nnz_int;
if ((ret_code = mm_read_mtx_crd_size(f, &m, &n, &nnz_int)) !=0) {
exit(1);
}
nnz = nnz_int;
cout << "m: " << m << " n: " << n << " nnz: " << nnz << endl;
hipHostMalloc((void **)&cooRowIndex, nnz * sizeof(int));
hipHostMalloc((void **)&cooColIndex, nnz * sizeof(int));
hipHostMalloc((void **)&cooVal, nnz * sizeof(double));
char data_type = argv[6][0];
// Read matrix from file into COO format
for (int i = 0; i < nnz; i++) {
if (data_type == 'b') { // binary input
fscanf(f, "%d %d\n", &cooRowIndex[i], &cooColIndex[i]);
cooVal[i] = 0.00001;
} else if (data_type == 'f'){ // float input
fscanf(f, "%d %d %lg\n", &cooRowIndex[i], &cooColIndex[i], &cooVal[i]);
}
cooRowIndex[i]--;
cooColIndex[i]--;
if (cooRowIndex[i] < 0 || cooColIndex[i] < 0) { // report error
cout << "i = " << i << " [" <<cooRowIndex[i] << ", " << cooColIndex[i] << "] = " << cooVal[i] << endl;
}
}
} else if(input_type == 'g') { // generate data
//int n = 10000;
n = atoi(filename);
m = n;
int nb = m / 8;
double r;
double r1 = 0.9;
double r2 = 0.01;
long long p = 0;
for (int i = 0; i < m; i += nb) {
if (i == 0) {
r = r1;
} else {
r = r2;
}
for (int ii = i; ii < i + nb; ii++) {
for (int j = 0; j < n * r; j++) {
p++;
}
}
}
nnz = p;
cout << "m: " << m << " n: " << n << " nnz: " << nnz << endl;
hipHostMalloc((void **)&cooRowIndex, nnz * sizeof(int));
hipHostMalloc((void **)&cooColIndex, nnz * sizeof(int));
hipHostMalloc((void **)&cooVal, nnz * sizeof(double));
p = 0;
cout << "Start generating data " << std::flush;
for (int i = 0; i < m; i += nb) {
cout << "." << std::flush;
if (i == 0) {
r = r1;
} else {
r = r2;
}
//cout << "Matrix:" << endl;
for (int ii = i; ii < i + nb; ii++) {
for (int j = 0; j < n * r; j++) {
//if (p > nnz) { cout << "error" << endl; break;}
//else {
cooRowIndex[p] = ii;
cooColIndex[p] = j;
cooVal[p] = (double) rand() / (RAND_MAX);
p++;
//cout << 1 << " ";
//}
}
//cout << endl;
}
}
cout << endl;
//cout << "m: " << m << " n: " << n << " nnz: " << p << endl;
cout << "Done generating data." << endl;
}
// Convert COO to CSR
//csrRowPtr = (int *) malloc((m+1) * sizeof(int));
hipHostMalloc((void **)&csrRowPtr, (m+1) * sizeof(long long));
//cout << "m: " << m << " n: " << n << " nnz: " << nnz << endl;
long long matrix_data_space = nnz * sizeof(double) + nnz * sizeof(int) + (m+1) * sizeof(int);
//cout << matrix_data_space << endl;
double matrix_size_in_gb = (double)matrix_data_space / 1e9;
cout << "Matrix space size: " << matrix_size_in_gb << " GB." << endl;
int * counter = new int[m];
for (int i = 0; i < m; i++) {
counter[i] = 0;
}
for (int i = 0; i < nnz; i++) {
counter[cooRowIndex[i]]++;
}
//cout << "nnz: " << nnz << endl;
//cout << "counter: ";
int t = 0;
for (int i = 0; i < m; i++) {
//cout << counter[i] << ", ";
t += counter[i];
}
//cout << t << endl;
//cout << endl;
//cout << "csrRowPtr: ";
csrRowPtr[0] = 0;
for (int i = 1; i <= m; i++) {
csrRowPtr[i] = csrRowPtr[i - 1] + counter[i - 1];
//cout << "csrRowPtr[" << i <<"] = "<<csrRowPtr[i] << endl;
}
double * x;
double * y1;
double * y2;
double * y3;
//x = (double *)malloc(n * sizeof(double));
//y1 = (double *)malloc(m * sizeof(double));
y2 = (double *)malloc(m * sizeof(double));
//y3 = (double *)malloc(m * sizeof(double));
hipHostMalloc((void **)&x, n * sizeof(double));
hipHostMalloc((void **)&y1, m * sizeof(double));
//hipHostMalloc((void **)&y2, m * sizeof(double));
hipHostMalloc((void **)&y3, m * sizeof(double));
for (int i = 0; i < n; i++)
{
x[i] = 1.0;//((double) rand() / (RAND_MAX));
}
for (int i = 0; i < m; i++)
{
y1[i] = 0.0;
y2[i] = 0.0;
y3[i] = 0.0;
}
double ALPHA = (double) rand() / (RAND_MAX);
double BETA = (double) rand() / (RAND_MAX);
double time_baseline = 0.0;
double time_v1 = 0.0;
double time_v2 = 0.0;
double avg_time_baseline = 0.0;
double avg_time_v1 = 0.0;
double avg_time_v2 = 0.0;
double curr_time = 0.0;
int warm_up_iter = 1;
double profile_time = 0.0;
double min_profile_time = numeric_limits<double>::max();
double best_dev_count = 0.0;
double best_copy = 0.0;
cout << "Warming up GPU(s)..." << endl;
for (int i = 0; i < warm_up_iter; i++) {
spMV_mgpu_v1(m, n, nnz, &ALPHA,
cooVal, csrRowPtr, cooColIndex,
x, &BETA,
y2,
ngpu,
kernel_version);
}
for (int d = 1; d <= ngpu; d*=2) {
for (int c = 1; c <= 8; c*=2) {
curr_time = get_time();
spMV_mgpu_v2(m, n, nnz, &ALPHA,
cooVal, csrRowPtr, cooColIndex,
x, &BETA,
y3,
d,
kernel_version,
nnz / (d * c),
c);
profile_time = get_time() - curr_time;
if (profile_time < min_profile_time) {
min_profile_time = profile_time;
best_dev_count = d;
best_copy = c;
}
}
}
int ret1 = 0;
int ret2 = 0;
int ret3 = 0;
cout << "Starting tests..." << endl;
//hipProfilerStart();
cout << " Test No. Baseline Version 1 Pass Version 2 Pass" << endl;
cout << " Time(s) Time(s) Time(s) " << endl;
cout << "=======================================================================" << endl;
for (int i = 0; i < repeat_test; i++) {
for (int i = 0; i < m; i++)
{
y1[i] = 0.0;
y2[i] = 0.0;
y3[i] = 0.0;
}
curr_time = get_time();
ret1 = spMV_mgpu_baseline(m, n, nnz, &ALPHA,
cooVal, csrRowPtr, cooColIndex,
x, &BETA,
y1,
ngpu);
time_baseline = get_time() - curr_time;
curr_time = get_time();
ret2 = spMV_mgpu_v1(m, n, nnz, &ALPHA,
cooVal, csrRowPtr, cooColIndex,
x, &BETA,
y2,
ngpu,
kernel_version);
time_v1 = get_time() - curr_time;
//hipProfilerStart();
curr_time = get_time();
ret3 = spMV_mgpu_v2(m, n, nnz, &ALPHA,
cooVal, csrRowPtr, cooColIndex,
x, &BETA,
y3,
best_dev_count,
kernel_version,
nnz / (best_dev_count * best_copy),
best_copy);
time_v2 = get_time() - curr_time;
avg_time_baseline += time_baseline;
avg_time_v1 += time_v1;
avg_time_v2 += time_v2;
bool correct1 = true;
bool correct2 = true;
for(int i = 0; i < m; i++) {
//cout << y1[i] << " - " << y2[i] << " - "<< y3[i] << endl;
if (abs(y1[i] - y2[i]) > 1e-3) {
//cout << y1[i] << " - " << y3[i] << endl;
correct1 = false;
}
if (abs(y1[i] - y3[i]) > 1e-3) {
correct2 = false;
}
}
cout << setw(10) << i+1;
if (ret1 == 0) {
cout << setw(11) << time_baseline;
} else {
cout << setw(11) << "Failed";
}
if (ret2 == 0) {
cout << setw(13) << time_v1;
} else {
cout << setw(13) << "Failed";
}
if (ret1 == 0) {
if (correct1) cout << setw(9) <<"Y";
else cout << setw(9) << "N";
} else {
cout << setw(9) <<"N/A";
}
if (ret3 == 0) {
cout << setw(14) << time_v2;
} else {
cout << setw(14) << "Failed.";
}
if (ret1 == 0) {
if (correct2) cout << setw(9) <<"Y";
else cout << setw(9) << "N";
} else {
cout << setw(9) <<"N/A";
}
cout << endl;
}
//hipProfilerStop();
avg_time_baseline/=repeat_test;
avg_time_v1/=repeat_test;
avg_time_v2/=repeat_test;
cout << "......................................................................." << endl;
cout << setw(10) << "Average" << " ";
if (ret1 == 0) {
cout << setw(11) << avg_time_baseline;
} else {
cout << setw(11) << "Failed";
}
if (ret2 == 0) {
cout << setw(13) << avg_time_v1;
} else {
cout << setw(13) << "Failed";
}
if (ret3 == 0) {
cout << setw(23) << avg_time_v2;
} else {
cout << setw(23) << "Failed";
}
cout << endl;
hipHostFree(cooRowIndex);
hipHostFree(cooColIndex);
hipHostFree(cooVal);
hipHostFree(csrRowPtr);
}
| 7b5e023ee5dc065f347418c5100d59005874e926.cu | #include <stdio.h>
#include <stdlib.h>
#include <ctime>
#include <sys/time.h>
#include <cuda_runtime.h>
#include "cusparse.h"
#include <iostream>
#include <iomanip>
#include <cmath>
#include "mmio.h"
#include <float.h>
#include <omp.h>
//#include "anonymouslib_cuda.h"
#include <cuda_profiler_api.h>
#include "spmv_kernel.h"
#include <limits>
using namespace std;
void print_error(cusparseStatus_t status) {
if (status == CUSPARSE_STATUS_NOT_INITIALIZED)
cout << "CUSPARSE_STATUS_NOT_INITIALIZED" << endl;
else if (status == CUSPARSE_STATUS_ALLOC_FAILED)
cout << "CUSPARSE_STATUS_ALLOC_FAILED" << endl;
else if (status == CUSPARSE_STATUS_INVALID_VALUE)
cout << "CUSPARSE_STATUS_INVALID_VALUE" << endl;
else if (status == CUSPARSE_STATUS_ARCH_MISMATCH)
cout << "CUSPARSE_STATUS_ARCH_MISMATCH" << endl;
else if (status == CUSPARSE_STATUS_INTERNAL_ERROR)
cout << "CUSPARSE_STATUS_INTERNAL_ERROR" << endl;
else if (status == CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED)
cout << "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED" << endl;
}
int main(int argc, char *argv[]) {
if (argc < 6) {
cout << "Incorrect number of arguments!" << endl;
cout << "Usage ./spmv [input matrix file] [number of GPU(s)] [number of test(s)] [kernel version (1-3)] [data type ('f' or 'b')]" << endl;
return -1;
}
char input_type = argv[1][0];
char * filename = argv[2];
int ngpu = atoi(argv[3]);
int repeat_test = atoi(argv[4]);
int kernel_version = atoi(argv[5]);
//int divide = atoi(argv[7]);
//int copy_of_workspace = atoi(argv[8]);
int ret_code;
MM_typecode matcode;
FILE *f;
int m, n;
long long nnz;
int * cooRowIndex;
int * cooColIndex;
double * cooVal;
long long * csrRowPtr;
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount < ngpu) {
cout << "Error: Not enough number of GPUs. Only " << deviceCount << "available." << endl;
return -1;
}
if (ngpu <= 0) {
cout << "Error: Number of GPU(s) needs to be greater than 0." << endl;
return -1;
}
if (kernel_version != 1 && kernel_version != 2 && kernel_version != 3) {
cout << "Error: The kernel version can only be: 1, 2, or 3." << endl;
return -1;
}
// if (divide <= 0) {
// cout << "Error: Number of tasks needs to be greater than 0." << endl;
// return -1;
// }
// if (copy_of_workspace <= 0) {
// cout << "Error: Number of Hyper-Q needs to be greater than 0." << endl;
// return -1;
// }
cout << "Using " << ngpu << " GPU(s)." << endl;
cout << "Kernel #" << kernel_version << " is selected." << endl;
//cout << divide << "total task(s) will be generated for version 2 with "<< copy_of_workspace << " Hyper-Q(s) on each GPU." << endl;
if (input_type == 'f') {
cout << "Loading input matrix from " << filename << endl;
if ((f = fopen(filename, "r")) == NULL) {
exit(1);
}
if (mm_read_banner(f, &matcode) != 0) {
printf("Could not process Matrix Market banner.\n");
exit(1);
}
int nnz_int;
if ((ret_code = mm_read_mtx_crd_size(f, &m, &n, &nnz_int)) !=0) {
exit(1);
}
nnz = nnz_int;
cout << "m: " << m << " n: " << n << " nnz: " << nnz << endl;
cudaMallocHost((void **)&cooRowIndex, nnz * sizeof(int));
cudaMallocHost((void **)&cooColIndex, nnz * sizeof(int));
cudaMallocHost((void **)&cooVal, nnz * sizeof(double));
char data_type = argv[6][0];
// Read matrix from file into COO format
for (int i = 0; i < nnz; i++) {
if (data_type == 'b') { // binary input
fscanf(f, "%d %d\n", &cooRowIndex[i], &cooColIndex[i]);
cooVal[i] = 0.00001;
} else if (data_type == 'f'){ // float input
fscanf(f, "%d %d %lg\n", &cooRowIndex[i], &cooColIndex[i], &cooVal[i]);
}
cooRowIndex[i]--;
cooColIndex[i]--;
if (cooRowIndex[i] < 0 || cooColIndex[i] < 0) { // report error
cout << "i = " << i << " [" <<cooRowIndex[i] << ", " << cooColIndex[i] << "] = " << cooVal[i] << endl;
}
}
} else if(input_type == 'g') { // generate data
//int n = 10000;
n = atoi(filename);
m = n;
int nb = m / 8;
double r;
double r1 = 0.9;
double r2 = 0.01;
long long p = 0;
for (int i = 0; i < m; i += nb) {
if (i == 0) {
r = r1;
} else {
r = r2;
}
for (int ii = i; ii < i + nb; ii++) {
for (int j = 0; j < n * r; j++) {
p++;
}
}
}
nnz = p;
cout << "m: " << m << " n: " << n << " nnz: " << nnz << endl;
cudaMallocHost((void **)&cooRowIndex, nnz * sizeof(int));
cudaMallocHost((void **)&cooColIndex, nnz * sizeof(int));
cudaMallocHost((void **)&cooVal, nnz * sizeof(double));
p = 0;
cout << "Start generating data " << std::flush;
for (int i = 0; i < m; i += nb) {
cout << "." << std::flush;
if (i == 0) {
r = r1;
} else {
r = r2;
}
//cout << "Matrix:" << endl;
for (int ii = i; ii < i + nb; ii++) {
for (int j = 0; j < n * r; j++) {
//if (p > nnz) { cout << "error" << endl; break;}
//else {
cooRowIndex[p] = ii;
cooColIndex[p] = j;
cooVal[p] = (double) rand() / (RAND_MAX);
p++;
//cout << 1 << " ";
//}
}
//cout << endl;
}
}
cout << endl;
//cout << "m: " << m << " n: " << n << " nnz: " << p << endl;
cout << "Done generating data." << endl;
}
// Convert COO to CSR
//csrRowPtr = (int *) malloc((m+1) * sizeof(int));
cudaMallocHost((void **)&csrRowPtr, (m+1) * sizeof(long long));
//cout << "m: " << m << " n: " << n << " nnz: " << nnz << endl;
long long matrix_data_space = nnz * sizeof(double) + nnz * sizeof(int) + (m+1) * sizeof(int);
//cout << matrix_data_space << endl;
double matrix_size_in_gb = (double)matrix_data_space / 1e9;
cout << "Matrix space size: " << matrix_size_in_gb << " GB." << endl;
int * counter = new int[m];
for (int i = 0; i < m; i++) {
counter[i] = 0;
}
for (int i = 0; i < nnz; i++) {
counter[cooRowIndex[i]]++;
}
//cout << "nnz: " << nnz << endl;
//cout << "counter: ";
int t = 0;
for (int i = 0; i < m; i++) {
//cout << counter[i] << ", ";
t += counter[i];
}
//cout << t << endl;
//cout << endl;
//cout << "csrRowPtr: ";
csrRowPtr[0] = 0;
for (int i = 1; i <= m; i++) {
csrRowPtr[i] = csrRowPtr[i - 1] + counter[i - 1];
//cout << "csrRowPtr[" << i <<"] = "<<csrRowPtr[i] << endl;
}
double * x;
double * y1;
double * y2;
double * y3;
//x = (double *)malloc(n * sizeof(double));
//y1 = (double *)malloc(m * sizeof(double));
y2 = (double *)malloc(m * sizeof(double));
//y3 = (double *)malloc(m * sizeof(double));
cudaMallocHost((void **)&x, n * sizeof(double));
cudaMallocHost((void **)&y1, m * sizeof(double));
//cudaMallocHost((void **)&y2, m * sizeof(double));
cudaMallocHost((void **)&y3, m * sizeof(double));
for (int i = 0; i < n; i++)
{
x[i] = 1.0;//((double) rand() / (RAND_MAX));
}
for (int i = 0; i < m; i++)
{
y1[i] = 0.0;
y2[i] = 0.0;
y3[i] = 0.0;
}
double ALPHA = (double) rand() / (RAND_MAX);
double BETA = (double) rand() / (RAND_MAX);
double time_baseline = 0.0;
double time_v1 = 0.0;
double time_v2 = 0.0;
double avg_time_baseline = 0.0;
double avg_time_v1 = 0.0;
double avg_time_v2 = 0.0;
double curr_time = 0.0;
int warm_up_iter = 1;
double profile_time = 0.0;
double min_profile_time = numeric_limits<double>::max();
double best_dev_count = 0.0;
double best_copy = 0.0;
cout << "Warming up GPU(s)..." << endl;
for (int i = 0; i < warm_up_iter; i++) {
spMV_mgpu_v1(m, n, nnz, &ALPHA,
cooVal, csrRowPtr, cooColIndex,
x, &BETA,
y2,
ngpu,
kernel_version);
}
for (int d = 1; d <= ngpu; d*=2) {
for (int c = 1; c <= 8; c*=2) {
curr_time = get_time();
spMV_mgpu_v2(m, n, nnz, &ALPHA,
cooVal, csrRowPtr, cooColIndex,
x, &BETA,
y3,
d,
kernel_version,
nnz / (d * c),
c);
profile_time = get_time() - curr_time;
if (profile_time < min_profile_time) {
min_profile_time = profile_time;
best_dev_count = d;
best_copy = c;
}
}
}
int ret1 = 0;
int ret2 = 0;
int ret3 = 0;
cout << "Starting tests..." << endl;
//cudaProfilerStart();
cout << " Test No. Baseline Version 1 Pass Version 2 Pass" << endl;
cout << " Time(s) Time(s) Time(s) " << endl;
cout << "=======================================================================" << endl;
for (int i = 0; i < repeat_test; i++) {
for (int i = 0; i < m; i++)
{
y1[i] = 0.0;
y2[i] = 0.0;
y3[i] = 0.0;
}
curr_time = get_time();
ret1 = spMV_mgpu_baseline(m, n, nnz, &ALPHA,
cooVal, csrRowPtr, cooColIndex,
x, &BETA,
y1,
ngpu);
time_baseline = get_time() - curr_time;
curr_time = get_time();
ret2 = spMV_mgpu_v1(m, n, nnz, &ALPHA,
cooVal, csrRowPtr, cooColIndex,
x, &BETA,
y2,
ngpu,
kernel_version);
time_v1 = get_time() - curr_time;
//cudaProfilerStart();
curr_time = get_time();
ret3 = spMV_mgpu_v2(m, n, nnz, &ALPHA,
cooVal, csrRowPtr, cooColIndex,
x, &BETA,
y3,
best_dev_count,
kernel_version,
nnz / (best_dev_count * best_copy),
best_copy);
time_v2 = get_time() - curr_time;
avg_time_baseline += time_baseline;
avg_time_v1 += time_v1;
avg_time_v2 += time_v2;
bool correct1 = true;
bool correct2 = true;
for(int i = 0; i < m; i++) {
//cout << y1[i] << " - " << y2[i] << " - "<< y3[i] << endl;
if (abs(y1[i] - y2[i]) > 1e-3) {
//cout << y1[i] << " - " << y3[i] << endl;
correct1 = false;
}
if (abs(y1[i] - y3[i]) > 1e-3) {
correct2 = false;
}
}
cout << setw(10) << i+1;
if (ret1 == 0) {
cout << setw(11) << time_baseline;
} else {
cout << setw(11) << "Failed";
}
if (ret2 == 0) {
cout << setw(13) << time_v1;
} else {
cout << setw(13) << "Failed";
}
if (ret1 == 0) {
if (correct1) cout << setw(9) <<"Y";
else cout << setw(9) << "N";
} else {
cout << setw(9) <<"N/A";
}
if (ret3 == 0) {
cout << setw(14) << time_v2;
} else {
cout << setw(14) << "Failed.";
}
if (ret1 == 0) {
if (correct2) cout << setw(9) <<"Y";
else cout << setw(9) << "N";
} else {
cout << setw(9) <<"N/A";
}
cout << endl;
}
//cudaProfilerStop();
avg_time_baseline/=repeat_test;
avg_time_v1/=repeat_test;
avg_time_v2/=repeat_test;
cout << "......................................................................." << endl;
cout << setw(10) << "Average" << " ";
if (ret1 == 0) {
cout << setw(11) << avg_time_baseline;
} else {
cout << setw(11) << "Failed";
}
if (ret2 == 0) {
cout << setw(13) << avg_time_v1;
} else {
cout << setw(13) << "Failed";
}
if (ret3 == 0) {
cout << setw(23) << avg_time_v2;
} else {
cout << setw(23) << "Failed";
}
cout << endl;
cudaFreeHost(cooRowIndex);
cudaFreeHost(cooColIndex);
cudaFreeHost(cooVal);
cudaFreeHost(csrRowPtr);
}
|
44e6e59164f6c5a6a5ebc1584764a96d9fac5393.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2018~2020 XGBoost contributors
*/
#include <xgboost/logging.h>
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <memory>
#include <mutex>
#include <utility>
#include <vector>
#include "device_helpers_hip.cuh"
#include "hist_util.h"
#include "hist_util_hip.cuh"
#include "math.h" // NOLINT
#include "quantile.h"
#include "xgboost/host_device_vector.h"
namespace xgboost {
namespace common {
constexpr float SketchContainer::kFactor;
namespace detail {
// Count the entries in each column and exclusive scan
void ExtractCutsSparse(int device, common::Span<SketchContainer::OffsetT const> cuts_ptr,
Span<Entry const> sorted_data,
Span<size_t const> column_sizes_scan,
Span<SketchEntry> out_cuts) {
dh::LaunchN(device, out_cuts.size(), [=] __device__(size_t idx) {
// Each thread is responsible for obtaining one cut from the sorted input
size_t column_idx = dh::SegmentId(cuts_ptr, idx);
size_t column_size =
column_sizes_scan[column_idx + 1] - column_sizes_scan[column_idx];
size_t num_available_cuts = cuts_ptr[column_idx + 1] - cuts_ptr[column_idx];
size_t cut_idx = idx - cuts_ptr[column_idx];
Span<Entry const> column_entries =
sorted_data.subspan(column_sizes_scan[column_idx], column_size);
size_t rank = (column_entries.size() * cut_idx) /
static_cast<float>(num_available_cuts);
out_cuts[idx] = WQSketch::Entry(rank, rank + 1, 1,
column_entries[rank].fvalue);
});
}
void ExtractWeightedCutsSparse(int device,
common::Span<SketchContainer::OffsetT const> cuts_ptr,
Span<Entry> sorted_data,
Span<float> weights_scan,
Span<size_t> column_sizes_scan,
Span<SketchEntry> cuts) {
dh::LaunchN(device, cuts.size(), [=] __device__(size_t idx) {
// Each thread is responsible for obtaining one cut from the sorted input
size_t column_idx = dh::SegmentId(cuts_ptr, idx);
size_t column_size =
column_sizes_scan[column_idx + 1] - column_sizes_scan[column_idx];
size_t num_available_cuts = cuts_ptr[column_idx + 1] - cuts_ptr[column_idx];
size_t cut_idx = idx - cuts_ptr[column_idx];
Span<Entry> column_entries =
sorted_data.subspan(column_sizes_scan[column_idx], column_size);
Span<float> column_weights_scan =
weights_scan.subspan(column_sizes_scan[column_idx], column_size);
float total_column_weight = column_weights_scan.back();
size_t sample_idx = 0;
if (cut_idx == 0) {
// First cut
sample_idx = 0;
} else if (cut_idx == num_available_cuts) {
// Last cut
sample_idx = column_entries.size() - 1;
} else if (num_available_cuts == column_size) {
// There are less samples available than our buffer
// Take every available sample
sample_idx = cut_idx;
} else {
bst_float rank = (total_column_weight * cut_idx) /
static_cast<float>(num_available_cuts);
sample_idx = thrust::upper_bound(thrust::seq,
column_weights_scan.begin(),
column_weights_scan.end(),
rank) -
column_weights_scan.begin();
sample_idx =
max(static_cast<size_t>(0),
min(sample_idx, column_entries.size() - 1));
}
// repeated values will be filtered out later.
bst_float rmin = sample_idx > 0 ? column_weights_scan[sample_idx - 1] : 0.0f;
bst_float rmax = column_weights_scan[sample_idx];
cuts[idx] = WQSketch::Entry(rmin, rmax, rmax - rmin,
column_entries[sample_idx].fvalue);
});
}
size_t RequiredSampleCutsPerColumn(int max_bins, size_t num_rows) {
double eps = 1.0 / (WQSketch::kFactor * max_bins);
size_t dummy_nlevel;
size_t num_cuts;
WQuantileSketch<bst_float, bst_float>::LimitSizeLevel(
num_rows, eps, &dummy_nlevel, &num_cuts);
return ::min(num_cuts, num_rows);
}
size_t RequiredSampleCuts(bst_row_t num_rows, bst_feature_t num_columns,
size_t max_bins, size_t nnz) {
auto per_column = RequiredSampleCutsPerColumn(max_bins, num_rows);
auto if_dense = num_columns * per_column;
auto result = ::min(nnz, if_dense);
return result;
}
size_t RequiredMemory(bst_row_t num_rows, bst_feature_t num_columns, size_t nnz,
size_t num_bins, bool with_weights) {
size_t peak = 0;
// 0. Allocate cut pointer in quantile container by increasing: n_columns + 1
size_t total = (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 1. Copy and sort: 2 * bytes_per_element * shape
total += BytesPerElement(with_weights) * num_rows * num_columns;
peak = ::max(peak, total);
// 2. Deallocate bytes_per_element * shape due to reusing memory in sort.
total -= BytesPerElement(with_weights) * num_rows * num_columns / 2;
// 3. Allocate colomn size scan by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 4. Allocate cut pointer by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 5. Allocate cuts: assuming rows is greater than bins: n_columns * limit_size
total += RequiredSampleCuts(num_rows, num_bins, num_bins, nnz) * sizeof(SketchEntry);
// 6. Deallocate copied entries by reducing: bytes_per_element * shape.
peak = ::max(peak, total);
total -= (BytesPerElement(with_weights) * num_rows * num_columns) / 2;
// 7. Deallocate column size scan.
peak = ::max(peak, total);
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 8. Deallocate cut size scan.
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 9. Allocate final cut values, min values, cut ptrs: ::min(rows, bins + 1) *
// n_columns + n_columns + n_columns + 1
total += ::min(num_rows, num_bins) * num_columns * sizeof(float);
total += num_columns *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().MinValues())>::value_type);
total += (num_columns + 1) *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().Ptrs())>::value_type);
peak = ::max(peak, total);
return peak;
}
size_t SketchBatchNumElements(size_t sketch_batch_num_elements,
bst_row_t num_rows, size_t columns, size_t nnz, int device,
size_t num_cuts, bool has_weight) {
if (sketch_batch_num_elements == 0) {
auto required_memory = RequiredMemory(num_rows, columns, nnz, num_cuts, has_weight);
// use up to 80% of available space
sketch_batch_num_elements = (dh::AvailableMemory(device) -
required_memory * 0.8);
}
return sketch_batch_num_elements;
}
void SortByWeight(dh::XGBCachingDeviceAllocator<char>* alloc,
dh::caching_device_vector<float>* weights,
dh::caching_device_vector<Entry>* sorted_entries) {
// Sort both entries and wegihts.
thrust::sort_by_key(thrust::hip::par(*alloc), sorted_entries->begin(),
sorted_entries->end(), weights->begin(),
detail::EntryCompareOp());
// Scan weights
thrust::inclusive_scan_by_key(thrust::hip::par(*alloc),
sorted_entries->begin(), sorted_entries->end(),
weights->begin(), weights->begin(),
[=] __device__(const Entry& a, const Entry& b) {
return a.index == b.index;
});
}
} // namespace detail
void ProcessBatch(int device, const SparsePage &page, size_t begin, size_t end,
SketchContainer *sketch_container, int num_cuts_per_feature,
size_t num_columns) {
dh::XGBCachingDeviceAllocator<char> alloc;
const auto& host_data = page.data.ConstHostVector();
dh::caching_device_vector<Entry> sorted_entries(host_data.begin() + begin,
host_data.begin() + end);
thrust::sort(thrust::hip::par(alloc), sorted_entries.begin(),
sorted_entries.end(), detail::EntryCompareOp());
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scanning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
batch_it, dummy_is_valid,
0, sorted_entries.size(),
&cuts_ptr, &column_sizes_scan);
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
dh::caching_device_vector<SketchEntry> cuts(h_cuts_ptr.back());
auto d_cuts_ptr = cuts_ptr.ConstDeviceSpan();
CHECK_EQ(d_cuts_ptr.size(), column_sizes_scan.size());
detail::ExtractCutsSparse(device, d_cuts_ptr, dh::ToSpan(sorted_entries),
dh::ToSpan(column_sizes_scan), dh::ToSpan(cuts));
// add cuts into sketches
sorted_entries.clear();
sorted_entries.shrink_to_fit();
CHECK_EQ(sorted_entries.capacity(), 0);
CHECK_NE(cuts_ptr.Size(), 0);
sketch_container->Push(cuts_ptr.ConstDeviceSpan(), &cuts);
}
void ProcessWeightedBatch(int device, const SparsePage& page,
Span<const float> weights, size_t begin, size_t end,
SketchContainer* sketch_container, int num_cuts_per_feature,
size_t num_columns,
bool is_ranking, Span<bst_group_t const> d_group_ptr) {
dh::XGBCachingDeviceAllocator<char> alloc;
const auto& host_data = page.data.ConstHostVector();
dh::caching_device_vector<Entry> sorted_entries(host_data.begin() + begin,
host_data.begin() + end);
// Binary search to assign weights to each element
dh::caching_device_vector<float> temp_weights(sorted_entries.size());
auto d_temp_weights = temp_weights.data().get();
page.offset.SetDevice(device);
auto row_ptrs = page.offset.ConstDeviceSpan();
size_t base_rowid = page.base_rowid;
if (is_ranking) {
CHECK_GE(d_group_ptr.size(), 2)
<< "Must have at least 1 group for ranking.";
CHECK_EQ(weights.size(), d_group_ptr.size() - 1)
<< "Weight size should equal to number of groups.";
dh::LaunchN(device, temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = thrust::upper_bound(thrust::seq, row_ptrs.begin(),
row_ptrs.end(), element_idx) -
row_ptrs.begin() - 1;
auto it =
thrust::upper_bound(thrust::seq,
d_group_ptr.cbegin(), d_group_ptr.cend(),
ridx + base_rowid) - 1;
bst_group_t group = thrust::distance(d_group_ptr.cbegin(), it);
d_temp_weights[idx] = weights[group];
});
} else {
CHECK_EQ(weights.size(), page.offset.Size() - 1);
dh::LaunchN(device, temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = thrust::upper_bound(thrust::seq, row_ptrs.begin(),
row_ptrs.end(), element_idx) -
row_ptrs.begin() - 1;
d_temp_weights[idx] = weights[ridx + base_rowid];
});
}
detail::SortByWeight(&alloc, &temp_weights, &sorted_entries);
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scaning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
batch_it, dummy_is_valid,
0, sorted_entries.size(),
&cuts_ptr, &column_sizes_scan);
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
dh::caching_device_vector<SketchEntry> cuts(h_cuts_ptr.back());
auto d_cuts_ptr = cuts_ptr.ConstDeviceSpan();
// Extract cuts
detail::ExtractWeightedCutsSparse(device, d_cuts_ptr,
dh::ToSpan(sorted_entries),
dh::ToSpan(temp_weights),
dh::ToSpan(column_sizes_scan),
dh::ToSpan(cuts));
// add cuts into sketches
sketch_container->Push(cuts_ptr.ConstDeviceSpan(), &cuts);
}
HistogramCuts DeviceSketch(int device, DMatrix* dmat, int max_bins,
size_t sketch_batch_num_elements) {
// Configure batch size based on available memory
bool has_weights = dmat->Info().weights_.Size() > 0;
size_t num_cuts_per_feature =
detail::RequiredSampleCutsPerColumn(max_bins, dmat->Info().num_row_);
sketch_batch_num_elements = detail::SketchBatchNumElements(
sketch_batch_num_elements,
dmat->Info().num_row_,
dmat->Info().num_col_,
dmat->Info().num_nonzero_,
device, num_cuts_per_feature, has_weights);
HistogramCuts cuts;
DenseCuts dense_cuts(&cuts);
SketchContainer sketch_container(max_bins, dmat->Info().num_col_,
dmat->Info().num_row_, device);
dmat->Info().weights_.SetDevice(device);
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
size_t batch_nnz = batch.data.Size();
auto const& info = dmat->Info();
for (auto begin = 0ull; begin < batch_nnz; begin += sketch_batch_num_elements) {
size_t end = ::min(batch_nnz, size_t(begin + sketch_batch_num_elements));
if (has_weights) {
bool is_ranking = CutsBuilder::UseGroup(dmat);
dh::caching_device_vector<uint32_t> groups(info.group_ptr_.cbegin(),
info.group_ptr_.cend());
ProcessWeightedBatch(
device, batch, dmat->Info().weights_.ConstDeviceSpan(), begin, end,
&sketch_container,
num_cuts_per_feature,
dmat->Info().num_col_,
is_ranking, dh::ToSpan(groups));
} else {
ProcessBatch(device, batch, begin, end, &sketch_container, num_cuts_per_feature,
dmat->Info().num_col_);
}
}
}
sketch_container.MakeCuts(&cuts);
return cuts;
}
} // namespace common
} // namespace xgboost
| 44e6e59164f6c5a6a5ebc1584764a96d9fac5393.cu | /*!
* Copyright 2018~2020 XGBoost contributors
*/
#include <xgboost/logging.h>
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <memory>
#include <mutex>
#include <utility>
#include <vector>
#include "device_helpers.cuh"
#include "hist_util.h"
#include "hist_util.cuh"
#include "math.h" // NOLINT
#include "quantile.h"
#include "xgboost/host_device_vector.h"
namespace xgboost {
namespace common {
constexpr float SketchContainer::kFactor;
namespace detail {
// Count the entries in each column and exclusive scan
void ExtractCutsSparse(int device, common::Span<SketchContainer::OffsetT const> cuts_ptr,
Span<Entry const> sorted_data,
Span<size_t const> column_sizes_scan,
Span<SketchEntry> out_cuts) {
dh::LaunchN(device, out_cuts.size(), [=] __device__(size_t idx) {
// Each thread is responsible for obtaining one cut from the sorted input
size_t column_idx = dh::SegmentId(cuts_ptr, idx);
size_t column_size =
column_sizes_scan[column_idx + 1] - column_sizes_scan[column_idx];
size_t num_available_cuts = cuts_ptr[column_idx + 1] - cuts_ptr[column_idx];
size_t cut_idx = idx - cuts_ptr[column_idx];
Span<Entry const> column_entries =
sorted_data.subspan(column_sizes_scan[column_idx], column_size);
size_t rank = (column_entries.size() * cut_idx) /
static_cast<float>(num_available_cuts);
out_cuts[idx] = WQSketch::Entry(rank, rank + 1, 1,
column_entries[rank].fvalue);
});
}
void ExtractWeightedCutsSparse(int device,
common::Span<SketchContainer::OffsetT const> cuts_ptr,
Span<Entry> sorted_data,
Span<float> weights_scan,
Span<size_t> column_sizes_scan,
Span<SketchEntry> cuts) {
dh::LaunchN(device, cuts.size(), [=] __device__(size_t idx) {
// Each thread is responsible for obtaining one cut from the sorted input
size_t column_idx = dh::SegmentId(cuts_ptr, idx);
size_t column_size =
column_sizes_scan[column_idx + 1] - column_sizes_scan[column_idx];
size_t num_available_cuts = cuts_ptr[column_idx + 1] - cuts_ptr[column_idx];
size_t cut_idx = idx - cuts_ptr[column_idx];
Span<Entry> column_entries =
sorted_data.subspan(column_sizes_scan[column_idx], column_size);
Span<float> column_weights_scan =
weights_scan.subspan(column_sizes_scan[column_idx], column_size);
float total_column_weight = column_weights_scan.back();
size_t sample_idx = 0;
if (cut_idx == 0) {
// First cut
sample_idx = 0;
} else if (cut_idx == num_available_cuts) {
// Last cut
sample_idx = column_entries.size() - 1;
} else if (num_available_cuts == column_size) {
// There are less samples available than our buffer
// Take every available sample
sample_idx = cut_idx;
} else {
bst_float rank = (total_column_weight * cut_idx) /
static_cast<float>(num_available_cuts);
sample_idx = thrust::upper_bound(thrust::seq,
column_weights_scan.begin(),
column_weights_scan.end(),
rank) -
column_weights_scan.begin();
sample_idx =
max(static_cast<size_t>(0),
min(sample_idx, column_entries.size() - 1));
}
// repeated values will be filtered out later.
bst_float rmin = sample_idx > 0 ? column_weights_scan[sample_idx - 1] : 0.0f;
bst_float rmax = column_weights_scan[sample_idx];
cuts[idx] = WQSketch::Entry(rmin, rmax, rmax - rmin,
column_entries[sample_idx].fvalue);
});
}
size_t RequiredSampleCutsPerColumn(int max_bins, size_t num_rows) {
double eps = 1.0 / (WQSketch::kFactor * max_bins);
size_t dummy_nlevel;
size_t num_cuts;
WQuantileSketch<bst_float, bst_float>::LimitSizeLevel(
num_rows, eps, &dummy_nlevel, &num_cuts);
return std::min(num_cuts, num_rows);
}
size_t RequiredSampleCuts(bst_row_t num_rows, bst_feature_t num_columns,
size_t max_bins, size_t nnz) {
auto per_column = RequiredSampleCutsPerColumn(max_bins, num_rows);
auto if_dense = num_columns * per_column;
auto result = std::min(nnz, if_dense);
return result;
}
size_t RequiredMemory(bst_row_t num_rows, bst_feature_t num_columns, size_t nnz,
size_t num_bins, bool with_weights) {
size_t peak = 0;
// 0. Allocate cut pointer in quantile container by increasing: n_columns + 1
size_t total = (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 1. Copy and sort: 2 * bytes_per_element * shape
total += BytesPerElement(with_weights) * num_rows * num_columns;
peak = std::max(peak, total);
// 2. Deallocate bytes_per_element * shape due to reusing memory in sort.
total -= BytesPerElement(with_weights) * num_rows * num_columns / 2;
// 3. Allocate colomn size scan by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 4. Allocate cut pointer by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 5. Allocate cuts: assuming rows is greater than bins: n_columns * limit_size
total += RequiredSampleCuts(num_rows, num_bins, num_bins, nnz) * sizeof(SketchEntry);
// 6. Deallocate copied entries by reducing: bytes_per_element * shape.
peak = std::max(peak, total);
total -= (BytesPerElement(with_weights) * num_rows * num_columns) / 2;
// 7. Deallocate column size scan.
peak = std::max(peak, total);
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 8. Deallocate cut size scan.
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 9. Allocate final cut values, min values, cut ptrs: std::min(rows, bins + 1) *
// n_columns + n_columns + n_columns + 1
total += std::min(num_rows, num_bins) * num_columns * sizeof(float);
total += num_columns *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().MinValues())>::value_type);
total += (num_columns + 1) *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().Ptrs())>::value_type);
peak = std::max(peak, total);
return peak;
}
size_t SketchBatchNumElements(size_t sketch_batch_num_elements,
bst_row_t num_rows, size_t columns, size_t nnz, int device,
size_t num_cuts, bool has_weight) {
if (sketch_batch_num_elements == 0) {
auto required_memory = RequiredMemory(num_rows, columns, nnz, num_cuts, has_weight);
// use up to 80% of available space
sketch_batch_num_elements = (dh::AvailableMemory(device) -
required_memory * 0.8);
}
return sketch_batch_num_elements;
}
void SortByWeight(dh::XGBCachingDeviceAllocator<char>* alloc,
dh::caching_device_vector<float>* weights,
dh::caching_device_vector<Entry>* sorted_entries) {
// Sort both entries and wegihts.
thrust::sort_by_key(thrust::cuda::par(*alloc), sorted_entries->begin(),
sorted_entries->end(), weights->begin(),
detail::EntryCompareOp());
// Scan weights
thrust::inclusive_scan_by_key(thrust::cuda::par(*alloc),
sorted_entries->begin(), sorted_entries->end(),
weights->begin(), weights->begin(),
[=] __device__(const Entry& a, const Entry& b) {
return a.index == b.index;
});
}
} // namespace detail
void ProcessBatch(int device, const SparsePage &page, size_t begin, size_t end,
SketchContainer *sketch_container, int num_cuts_per_feature,
size_t num_columns) {
dh::XGBCachingDeviceAllocator<char> alloc;
const auto& host_data = page.data.ConstHostVector();
dh::caching_device_vector<Entry> sorted_entries(host_data.begin() + begin,
host_data.begin() + end);
thrust::sort(thrust::cuda::par(alloc), sorted_entries.begin(),
sorted_entries.end(), detail::EntryCompareOp());
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scanning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
batch_it, dummy_is_valid,
0, sorted_entries.size(),
&cuts_ptr, &column_sizes_scan);
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
dh::caching_device_vector<SketchEntry> cuts(h_cuts_ptr.back());
auto d_cuts_ptr = cuts_ptr.ConstDeviceSpan();
CHECK_EQ(d_cuts_ptr.size(), column_sizes_scan.size());
detail::ExtractCutsSparse(device, d_cuts_ptr, dh::ToSpan(sorted_entries),
dh::ToSpan(column_sizes_scan), dh::ToSpan(cuts));
// add cuts into sketches
sorted_entries.clear();
sorted_entries.shrink_to_fit();
CHECK_EQ(sorted_entries.capacity(), 0);
CHECK_NE(cuts_ptr.Size(), 0);
sketch_container->Push(cuts_ptr.ConstDeviceSpan(), &cuts);
}
void ProcessWeightedBatch(int device, const SparsePage& page,
Span<const float> weights, size_t begin, size_t end,
SketchContainer* sketch_container, int num_cuts_per_feature,
size_t num_columns,
bool is_ranking, Span<bst_group_t const> d_group_ptr) {
dh::XGBCachingDeviceAllocator<char> alloc;
const auto& host_data = page.data.ConstHostVector();
dh::caching_device_vector<Entry> sorted_entries(host_data.begin() + begin,
host_data.begin() + end);
// Binary search to assign weights to each element
dh::caching_device_vector<float> temp_weights(sorted_entries.size());
auto d_temp_weights = temp_weights.data().get();
page.offset.SetDevice(device);
auto row_ptrs = page.offset.ConstDeviceSpan();
size_t base_rowid = page.base_rowid;
if (is_ranking) {
CHECK_GE(d_group_ptr.size(), 2)
<< "Must have at least 1 group for ranking.";
CHECK_EQ(weights.size(), d_group_ptr.size() - 1)
<< "Weight size should equal to number of groups.";
dh::LaunchN(device, temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = thrust::upper_bound(thrust::seq, row_ptrs.begin(),
row_ptrs.end(), element_idx) -
row_ptrs.begin() - 1;
auto it =
thrust::upper_bound(thrust::seq,
d_group_ptr.cbegin(), d_group_ptr.cend(),
ridx + base_rowid) - 1;
bst_group_t group = thrust::distance(d_group_ptr.cbegin(), it);
d_temp_weights[idx] = weights[group];
});
} else {
CHECK_EQ(weights.size(), page.offset.Size() - 1);
dh::LaunchN(device, temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = thrust::upper_bound(thrust::seq, row_ptrs.begin(),
row_ptrs.end(), element_idx) -
row_ptrs.begin() - 1;
d_temp_weights[idx] = weights[ridx + base_rowid];
});
}
detail::SortByWeight(&alloc, &temp_weights, &sorted_entries);
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scaning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
batch_it, dummy_is_valid,
0, sorted_entries.size(),
&cuts_ptr, &column_sizes_scan);
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
dh::caching_device_vector<SketchEntry> cuts(h_cuts_ptr.back());
auto d_cuts_ptr = cuts_ptr.ConstDeviceSpan();
// Extract cuts
detail::ExtractWeightedCutsSparse(device, d_cuts_ptr,
dh::ToSpan(sorted_entries),
dh::ToSpan(temp_weights),
dh::ToSpan(column_sizes_scan),
dh::ToSpan(cuts));
// add cuts into sketches
sketch_container->Push(cuts_ptr.ConstDeviceSpan(), &cuts);
}
HistogramCuts DeviceSketch(int device, DMatrix* dmat, int max_bins,
size_t sketch_batch_num_elements) {
// Configure batch size based on available memory
bool has_weights = dmat->Info().weights_.Size() > 0;
size_t num_cuts_per_feature =
detail::RequiredSampleCutsPerColumn(max_bins, dmat->Info().num_row_);
sketch_batch_num_elements = detail::SketchBatchNumElements(
sketch_batch_num_elements,
dmat->Info().num_row_,
dmat->Info().num_col_,
dmat->Info().num_nonzero_,
device, num_cuts_per_feature, has_weights);
HistogramCuts cuts;
DenseCuts dense_cuts(&cuts);
SketchContainer sketch_container(max_bins, dmat->Info().num_col_,
dmat->Info().num_row_, device);
dmat->Info().weights_.SetDevice(device);
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
size_t batch_nnz = batch.data.Size();
auto const& info = dmat->Info();
for (auto begin = 0ull; begin < batch_nnz; begin += sketch_batch_num_elements) {
size_t end = std::min(batch_nnz, size_t(begin + sketch_batch_num_elements));
if (has_weights) {
bool is_ranking = CutsBuilder::UseGroup(dmat);
dh::caching_device_vector<uint32_t> groups(info.group_ptr_.cbegin(),
info.group_ptr_.cend());
ProcessWeightedBatch(
device, batch, dmat->Info().weights_.ConstDeviceSpan(), begin, end,
&sketch_container,
num_cuts_per_feature,
dmat->Info().num_col_,
is_ranking, dh::ToSpan(groups));
} else {
ProcessBatch(device, batch, begin, end, &sketch_container, num_cuts_per_feature,
dmat->Info().num_col_);
}
}
}
sketch_container.MakeCuts(&cuts);
return cuts;
}
} // namespace common
} // namespace xgboost
|
a54ab1bb629de5428a39508cac14f77e272bb83b.hip | // !!! This is a file automatically generated by hipify!!!
#include "NvInfer.h"
#include "plugin.h"
// #include "common.h"
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstring>
#include <cublasLt.h>
#include <hip/hip_runtime.h>
#include <vector>
#include"hip/hip_fp16.h"
// template<typename T>
// __global__ void Slice (const T* input_data,const T* input_slice,T* output)
// {
// int i= blockIdx.x*gridDim.y+blockIdx.y*blockDim.y + threadIdx.x;
// // printf("index %d input_data %f input_slice %f\n",i,input_data[i],input_slice[i]);
// // printf("blockDim.x %d blockDim %d input_slice %d\n",blockDim.x ,gridDim.y,gridDim.x);
// // output[i]=input_data[i];
// // const T in=(input_data[i]+input_slice[i]);
// output[i]=(input_data[i]);
// // *(output+i)=*(input_data+i);
// //*(output+i)=*(input_data+i)+*(input_slice+i);
// }
// __global__ void Slice (const half* input_data,const half* input_slice,half* output)
// {
// int i= blockIdx.x*gridDim.y+blockIdx.y*blockDim.y + threadIdx.x;
// output[i]=__hadd(input_data[i],input_slice[i]);
// // *(output+i)=*(input_data+i);
// //*(output+i)=*(input_data+i)+*(input_slice+i);
// }
// pluginStatus_t Sliceinference(hipStream_t stream,const int x1,const int y1,const int z1,const int x2,const int y2,
// const int z2,const float* input_data,const float* input_slice,float* output)
// {
// dim3 dimBlock(x1,y1);
// dim3 dimthread(z1);
// std::cout<<x1<<" "<<y1<<" "<<z1<<std::endl;
// Slice<float><<<dimBlock,dimthread,0,stream>>>(input_data, input_slice, output);
// return STATUS_SUCCESS;
// }
// pluginStatus_t Sliceinference(hipStream_t stream,const int x1,const int y1,const int z1,const int x2,const int y2,
// const int z2, const half* input_data, const half* input_slice,half* output)
// {
// dim3 dimBlock(x1,y1);
// dim3 dimthread(z1);
// // std::cout<<x1<<" "<<y1<<" "<<z1<<std::endl;
// // const half2* input_data_ = reinterpret_cast<const half2*>(input_data);
// // const half2* input_slice_ = reinterpret_cast<const half2*>(input_slice);
// // half2* output_ = reinterpret_cast<half2*>(output);
// Slice<<<dimBlock,dimthread,0,stream>>>(input_data, input_slice, output);
// return STATUS_SUCCESS;
// }
template<typename T>
__global__ void Slice (const T* input_data,const T* input_slice,T* output)
{
int i= blockIdx.x*blockDim.y*blockDim.x+threadIdx.x*blockDim.y + threadIdx.y;
// printf("%d %f %f %d %d %d\n",i,input_slice[i],input_data[threadIdx.x*blockDim.y + threadIdx.y],blockIdx.x,threadIdx.x,blockIdx.y);
output[i]=input_slice[i]+input_data[threadIdx.x*blockDim.y + threadIdx.y];
}
__global__ void Slice (const half* input_data,const half* input_slice,half* output)
{
int i= blockIdx.x*blockDim.y*blockDim.x+threadIdx.x*blockDim.y + threadIdx.y;
output[i]=__hadd(input_data[threadIdx.x*blockDim.y + threadIdx.y],input_slice[i]);
}
pluginStatus_t Sliceinference(hipStream_t stream,const int x1,const int y1,const int z1,const int x2,const int y2,
const int z2,const float* input_data,const float* input_slice,float* output,int batchsize)
{
dim3 dimBlock(y2,z2);
dim3 dimgrid(batchsize);
std::cout<<"cuda"<<y2<<" "<<z2<<" "<<batchsize<<std::endl;
hipLaunchKernelGGL(( Slice<float>), dim3(dimgrid),dim3(dimBlock),0,stream, input_data, input_slice, output);
return STATUS_SUCCESS;
}
pluginStatus_t Sliceinference(hipStream_t stream,const int x1,const int y1,const int z1,const int x2,const int y2,
const int z2, const half* input_data, const half* input_slice,half* output,int batchsize)
{
dim3 dimBlock(y1,z1);
dim3 dimgrid(batchsize);
hipLaunchKernelGGL(( Slice), dim3(dimgrid),dim3(dimBlock),0,stream, input_data, input_slice, output);
return STATUS_SUCCESS;
} | a54ab1bb629de5428a39508cac14f77e272bb83b.cu | #include "NvInfer.h"
#include "plugin.h"
// #include "common.h"
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstring>
#include <cublasLt.h>
#include <cuda_runtime.h>
#include <vector>
#include"cuda_fp16.h"
// template<typename T>
// __global__ void Slice (const T* input_data,const T* input_slice,T* output)
// {
// int i= blockIdx.x*gridDim.y+blockIdx.y*blockDim.y + threadIdx.x;
// // printf("index %d input_data %f input_slice %f\n",i,input_data[i],input_slice[i]);
// // printf("blockDim.x %d blockDim %d input_slice %d\n",blockDim.x ,gridDim.y,gridDim.x);
// // output[i]=input_data[i];
// // const T in=(input_data[i]+input_slice[i]);
// output[i]=(input_data[i]);
// // *(output+i)=*(input_data+i);
// //*(output+i)=*(input_data+i)+*(input_slice+i);
// }
// __global__ void Slice (const half* input_data,const half* input_slice,half* output)
// {
// int i= blockIdx.x*gridDim.y+blockIdx.y*blockDim.y + threadIdx.x;
// output[i]=__hadd(input_data[i],input_slice[i]);
// // *(output+i)=*(input_data+i);
// //*(output+i)=*(input_data+i)+*(input_slice+i);
// }
// pluginStatus_t Sliceinference(cudaStream_t stream,const int x1,const int y1,const int z1,const int x2,const int y2,
// const int z2,const float* input_data,const float* input_slice,float* output)
// {
// dim3 dimBlock(x1,y1);
// dim3 dimthread(z1);
// std::cout<<x1<<" "<<y1<<" "<<z1<<std::endl;
// Slice<float><<<dimBlock,dimthread,0,stream>>>(input_data, input_slice, output);
// return STATUS_SUCCESS;
// }
// pluginStatus_t Sliceinference(cudaStream_t stream,const int x1,const int y1,const int z1,const int x2,const int y2,
// const int z2, const half* input_data, const half* input_slice,half* output)
// {
// dim3 dimBlock(x1,y1);
// dim3 dimthread(z1);
// // std::cout<<x1<<" "<<y1<<" "<<z1<<std::endl;
// // const half2* input_data_ = reinterpret_cast<const half2*>(input_data);
// // const half2* input_slice_ = reinterpret_cast<const half2*>(input_slice);
// // half2* output_ = reinterpret_cast<half2*>(output);
// Slice<<<dimBlock,dimthread,0,stream>>>(input_data, input_slice, output);
// return STATUS_SUCCESS;
// }
template<typename T>
__global__ void Slice (const T* input_data,const T* input_slice,T* output)
{
int i= blockIdx.x*blockDim.y*blockDim.x+threadIdx.x*blockDim.y + threadIdx.y;
// printf("%d %f %f %d %d %d\n",i,input_slice[i],input_data[threadIdx.x*blockDim.y + threadIdx.y],blockIdx.x,threadIdx.x,blockIdx.y);
output[i]=input_slice[i]+input_data[threadIdx.x*blockDim.y + threadIdx.y];
}
__global__ void Slice (const half* input_data,const half* input_slice,half* output)
{
int i= blockIdx.x*blockDim.y*blockDim.x+threadIdx.x*blockDim.y + threadIdx.y;
output[i]=__hadd(input_data[threadIdx.x*blockDim.y + threadIdx.y],input_slice[i]);
}
pluginStatus_t Sliceinference(cudaStream_t stream,const int x1,const int y1,const int z1,const int x2,const int y2,
const int z2,const float* input_data,const float* input_slice,float* output,int batchsize)
{
dim3 dimBlock(y2,z2);
dim3 dimgrid(batchsize);
std::cout<<"cuda"<<y2<<" "<<z2<<" "<<batchsize<<std::endl;
Slice<float><<<dimgrid,dimBlock,0,stream>>>(input_data, input_slice, output);
return STATUS_SUCCESS;
}
pluginStatus_t Sliceinference(cudaStream_t stream,const int x1,const int y1,const int z1,const int x2,const int y2,
const int z2, const half* input_data, const half* input_slice,half* output,int batchsize)
{
dim3 dimBlock(y1,z1);
dim3 dimgrid(batchsize);
Slice<<<dimgrid,dimBlock,0,stream>>>(input_data, input_slice, output);
return STATUS_SUCCESS;
} |
111db727f5ca531cc15afe8f4e0b3ffe10a8a968.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zmgeelltmv.cu normal z -> s, Tue Sep 2 12:38:33 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
__global__ void
smgeelltmv_kernel( int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
float alpha,
float *d_val,
magma_index_t *d_colind,
float *d_x,
float beta,
float *d_y)
{
extern __shared__ float dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_rows * n + row ];
float val = d_val [ num_rows * n + row ];
if( val != 0){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * d_x[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++ )
d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * d_y [ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
num_vecs mama_int_t
number of vectors
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha float
scalar multiplier
@param
d_val float*
array containing values of A in ELL
@param
d_colind magma_int_t*
columnindices of A in ELL
@param
d_x float*
input vector x
@param
beta float
scalar multiplier
@param
d_y float*
input/output vector y
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smgeelltmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
float alpha,
float *d_val,
magma_index_t *d_colind,
float *d_x,
float beta,
float *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( float ); // num_vecs vectors
hipLaunchKernelGGL(( smgeelltmv_kernel), dim3(grid), dim3(BLOCK_SIZE), MEM_SIZE , 0,
m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
| 111db727f5ca531cc15afe8f4e0b3ffe10a8a968.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zmgeelltmv.cu normal z -> s, Tue Sep 2 12:38:33 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
__global__ void
smgeelltmv_kernel( int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
float alpha,
float *d_val,
magma_index_t *d_colind,
float *d_x,
float beta,
float *d_y)
{
extern __shared__ float dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_rows * n + row ];
float val = d_val [ num_rows * n + row ];
if( val != 0){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * d_x[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++ )
d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * d_y [ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
num_vecs mama_int_t
number of vectors
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha float
scalar multiplier
@param
d_val float*
array containing values of A in ELL
@param
d_colind magma_int_t*
columnindices of A in ELL
@param
d_x float*
input vector x
@param
beta float
scalar multiplier
@param
d_y float*
input/output vector y
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smgeelltmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
float alpha,
float *d_val,
magma_index_t *d_colind,
float *d_x,
float beta,
float *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( float ); // num_vecs vectors
smgeelltmv_kernel<<< grid, BLOCK_SIZE, MEM_SIZE >>>
( m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
|
8d09c491498f4d07f95d4bc3a97ab8b829abe117.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/DistributionTemplates.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <THH/THHGeneral.h>
#include <THH/THHDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void bernoulli_tensor_kernel(Tensor& self, const Tensor& p_, c10::optional<Generator> gen_) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::bernoulli_kernel(self, p_, generator);
}
void bernoulli_scalar_kernel(Tensor& self, double p, c10::optional<Generator> gen) {
auto iter = TensorIterator::borrowing_nullary_op(self);
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::bernoulli_kernel(iter, p, generator);
}
REGISTER_DISPATCH(bernoulli_tensor_stub, &bernoulli_tensor_kernel);
REGISTER_DISPATCH(bernoulli_scalar_stub, &bernoulli_scalar_kernel);
}} // namespace at::native
| 8d09c491498f4d07f95d4bc3a97ab8b829abe117.cu | #include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <THC/THCGeneral.h>
#include <THC/THCDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void bernoulli_tensor_kernel(Tensor& self, const Tensor& p_, c10::optional<Generator> gen_) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::bernoulli_kernel(self, p_, generator);
}
void bernoulli_scalar_kernel(Tensor& self, double p, c10::optional<Generator> gen) {
auto iter = TensorIterator::borrowing_nullary_op(self);
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::bernoulli_kernel(iter, p, generator);
}
REGISTER_DISPATCH(bernoulli_tensor_stub, &bernoulli_tensor_kernel);
REGISTER_DISPATCH(bernoulli_scalar_stub, &bernoulli_scalar_kernel);
}} // namespace at::native
|
5e6e4a0902a74245768011a524a70241871e7a72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* CSR_Graph.cu
*
* Created on: Dec 12, 2014
* Author: pakij
*/
#include "CSR_Graph.h"
__device__ double atomicMin(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong( fmin(val, __longlong_as_double(assumed))) );
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
__global__ void BellmanFord_split1cuda(int * finished, int V, int E, int *offsets, int *edge_dests,
double *weights, int * preds, int * temp_preds, double * path_weights, double * temp_path_weights){
//int my_vert = blockIdx.x;
int my_vert = blockIdx.x *blockDim.x + threadIdx.x;
if(my_vert < V) {
double my_dist;
int first_target_index, last_target_index, target_index, target;
double new_dist;
my_dist = path_weights[my_vert];
//Find bounds of adjacency list
first_target_index = offsets[my_vert];
if(my_vert != V-1){
last_target_index = offsets[my_vert+1];
}
else{
last_target_index = E;
}
for(target_index = first_target_index; target_index < last_target_index; target_index++){
target = edge_dests[target_index];
new_dist = my_dist + weights[target_index];
// need to change path_weights[target] and update predecessors[target]
// Try to make this atomic
if(new_dist < atomicMin(&temp_path_weights[target], new_dist) ){
temp_preds[target] = my_vert;
//preds[target] = my_vert;
*finished = 0;
//temp_path_weights[target] = new_dist;
}
}
}
}
__global__ void BellmanFord_split2cuda(int V, int E, int *offsets, int *edge_dests, double *weights, int * preds, int * temp_preds, double * path_weights, double * temp_path_weights){
//int my_vert = blockIdx.x;
int my_vert = blockIdx.x *blockDim.x + threadIdx.x;
int first_target_index, last_target_index;
int pred_vert;
if(my_vert < V){
pred_vert = temp_preds[my_vert];
if(pred_vert >= 0 && pred_vert != my_vert){
//Update predecessors
preds[my_vert] = pred_vert;
//Find bounds of adjacency list of pred_vert, want to find edge that leads to my_vert
first_target_index = offsets[pred_vert];
if(pred_vert != V-1){
last_target_index = offsets[pred_vert+1];
}
else{
last_target_index = E;
}
//Update path_weights
for(int i=first_target_index; i < last_target_index; i++){
if(edge_dests[i] == my_vert){
temp_path_weights[my_vert] = path_weights[pred_vert] + weights[i];
break;
}
}
}
}
}
double CSR_Graph::BellmanFordGPU_Split(int source_, std::vector <int> &predecessors, std::vector <double> &path_weight){
int num_blocks = (V + threads_per_block - 1) / threads_per_block;
//Initialize predecessor tree
predecessors.clear();
path_weight.clear();
double inf = std::numeric_limits<double>::infinity();
predecessors.resize(V,-1);
path_weight.resize(V,E*max_weight);
predecessors[source_]=source_;
path_weight[source_]=0;
int finished;
boost::timer::auto_cpu_timer t;
//GPU pointers
int * d_offsets;
int * d_edge_dests;
double * d_weights;
int * d_predecessors;
double * d_path_weight;
int * d_temp_predecessors;
double * d_temp_path_weight;
int * d_finished;
double * temp;
//Size of CSR graph
int offsets_size = V*sizeof(int);
int edge_dests_size = E*sizeof(int);
int weights_size = E*sizeof(double);
//Size of predecessor tree into
int predecessors_size = V*sizeof(int);
int temp_predecessors_size = V*sizeof(int);
int path_weight_size = V*sizeof(double);
//Allocate memory on device
hipMalloc((void **) & d_offsets, offsets_size);
hipMalloc((void **) & d_edge_dests, edge_dests_size);
hipMalloc((void **) & d_weights, weights_size);
hipMalloc((void **) & d_predecessors, predecessors_size);
hipMalloc((void **) & d_temp_predecessors, temp_predecessors_size);
hipMalloc((void **) & d_path_weight, path_weight_size);
hipMalloc((void **) & d_temp_path_weight, path_weight_size);
hipMalloc((void **) & d_finished, sizeof(int));
std::cout<<"Transferring to GPU"<<std::endl;
hipMemcpy(d_offsets, (int *) &offsets[0], offsets_size, hipMemcpyHostToDevice);
hipMemcpy(d_edge_dests, (int *) &edge_dests[0], edge_dests_size, hipMemcpyHostToDevice);
hipMemcpy(d_weights, (double *) &weights[0], weights_size, hipMemcpyHostToDevice);
hipMemcpy(d_predecessors, (int *) &predecessors[0], predecessors_size, hipMemcpyHostToDevice);
hipMemcpy(d_temp_predecessors, (int *) &predecessors[0], temp_predecessors_size, hipMemcpyHostToDevice);
hipMemcpy(d_path_weight, (double *) &path_weight[0], path_weight_size, hipMemcpyHostToDevice);
hipMemcpy(d_temp_path_weight, (double *) &path_weight[0], path_weight_size, hipMemcpyHostToDevice);
std::cout<<"Running kernel with <<<" << num_blocks << ", " << threads_per_block << ">>>" <<std::endl;
int iter=0;
finished=0;
boost::timer::cpu_timer timer;
// for(int iter=0; iter<V; iter++){
while(finished == 0 && iter < E) {
//std::cout<<"Iter = "<<iter<<std::endl;
finished=1;
//std::cout<<finished<<std::endl;
hipMemcpy(d_finished, &finished, sizeof(int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BellmanFord_split1cuda), dim3(num_blocks), dim3(threads_per_block), 0, 0, d_finished, V, E, d_offsets,d_edge_dests,
d_weights,d_predecessors,d_temp_predecessors,d_path_weight, d_temp_path_weight);
hipDeviceSynchronize();
hipMemcpy(&finished, d_finished, sizeof(int), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( BellmanFord_split2cuda), dim3(num_blocks), dim3(threads_per_block), 0, 0, V, E, d_offsets,d_edge_dests,
d_weights,d_predecessors,d_temp_predecessors,d_path_weight, d_temp_path_weight);
hipDeviceSynchronize();
hipMemcpy(d_path_weight, d_temp_path_weight, path_weight_size, hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
hipMemcpy((double *) &path_weight[0], d_path_weight, path_weight_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// for(int i=0; i<V; i++) {
// std::cout<<"V: "<<i<<",\t PW: "<<path_weight[i]<<std::endl;
// }
//std::cout<<finished<<std::endl;
// temp=d_path_weight;
// d_path_weight = d_temp_path_weight;
// d_temp_path_weight = temp;
iter++;
}
// }
timer.stop();
std::cout<<"Iterations = "<<iter<<std::endl;
//Copy results back to host
//hipMemcpy((int *) &offsets[0], d_offsets, offsets_size, hipMemcpyDeviceToHost);
//hipMemcpy((int *) &edge_dests[0], d_edge_dests, edge_dests_size, hipMemcpyDeviceToHost);
//hipMemcpy((double *) &weights[0], d_weights, weights_size, hipMemcpyDeviceToHost);
hipMemcpy((int *) &predecessors[0], d_predecessors, predecessors_size, hipMemcpyDeviceToHost);
hipMemcpy((double *) &path_weight[0], d_path_weight, path_weight_size, hipMemcpyDeviceToHost);
//cleanup
hipFree(d_offsets); hipFree(d_edge_dests); hipFree(d_weights);
hipFree(d_predecessors); hipFree(d_path_weight); hipFree(d_temp_predecessors);
for(int i=0; i<V; i++){
if(path_weight[i] == E*max_weight){
path_weight[i] = inf;
}
}
return (double) timer.elapsed().wall / 1000000000.0;
}
__global__ void BellmanFord_cuda(int V, int E, int *offsets, int *edge_dests, double *weights, int * preds, double * path_weights){
//int my_vert = blockIdx.x;
int my_vert = blockIdx.x *blockDim.x + threadIdx.x;
//int my_vert = threadIdx.x;
if(my_vert < V) {
int source_vert;
double my_dist = path_weights[my_vert];
double trial_dist;
source_vert=0;
for(int i=0; i<E; i++){
if(edge_dests[i] == my_vert){
//we can keep track of what the source vertex could be, since the edge list is sorted by them
while(source_vert != V-1 && offsets[source_vert+1] <= i){
source_vert++;
}
trial_dist = weights[i] + path_weights[source_vert]; //Data race, possibly benign?
if(trial_dist < my_dist){
path_weights[my_vert] = trial_dist;
preds[my_vert] = source_vert;
}
}
}
}
}
double CSR_Graph::BellmanFordGPU(int source_, std::vector <int> &predecessors, std::vector <double> &path_weight){
int num_blocks = (V + threads_per_block - 1) / threads_per_block;
//Initialize predecessor tree
predecessors.clear();
path_weight.clear();
double inf = std::numeric_limits<double>::infinity();
predecessors.resize(V,-1);
path_weight.resize(V,E*max_weight);
predecessors[source_]=source_;
path_weight[source_]=0;
boost::timer::auto_cpu_timer t;
//GPU pointers
int * d_offsets;
int * d_edge_dests;
double * d_weights;
int * d_predecessors;
double * d_path_weight;
//Size of CSR graph
int offsets_size = V*sizeof(int);
int edge_dests_size = E*sizeof(int);
int weights_size = E*sizeof(double);
//Size of predecessor tree into
int predecessors_size = V*sizeof(int);
int path_weight_size = V*sizeof(double);
//Allocate memory on device
hipMalloc((void **) & d_offsets, offsets_size);
hipMalloc((void **) & d_edge_dests, edge_dests_size);
hipMalloc((void **) & d_weights, weights_size);
hipMalloc((void **) & d_predecessors, predecessors_size);
hipMalloc((void **) & d_path_weight, path_weight_size);
std::cout<<"Transferring to GPU"<<std::endl;
hipMemcpy(d_offsets, (int *) &offsets[0], offsets_size, hipMemcpyHostToDevice);
hipMemcpy(d_edge_dests, (int *) &edge_dests[0], edge_dests_size, hipMemcpyHostToDevice);
hipMemcpy(d_weights, (double *) &weights[0], weights_size, hipMemcpyHostToDevice);
hipMemcpy(d_predecessors, (int *) &predecessors[0], predecessors_size, hipMemcpyHostToDevice);
hipMemcpy(d_path_weight, (double *) &path_weight[0], path_weight_size, hipMemcpyHostToDevice);
std::cout<<"Running kernel with <<<" << num_blocks << ", " << threads_per_block << ">>>" <<std::endl;
boost::timer::cpu_timer timer;
for(int iter=0; iter<V; iter++){
//std::cout<<iter<<std::endl;
hipLaunchKernelGGL(( BellmanFord_cuda), dim3(num_blocks), dim3(threads_per_block), 0, 0, V, E, d_offsets,d_edge_dests,d_weights,d_predecessors,d_path_weight);
hipDeviceSynchronize();
}
timer.stop();
//Copy results back to host
hipMemcpy((int *) &offsets[0], d_offsets, offsets_size, hipMemcpyDeviceToHost);
hipMemcpy((int *) &edge_dests[0], d_edge_dests, edge_dests_size, hipMemcpyDeviceToHost);
hipMemcpy((double *) &weights[0], d_weights, weights_size, hipMemcpyDeviceToHost);
hipMemcpy((int *) &predecessors[0], d_predecessors, predecessors_size, hipMemcpyDeviceToHost);
hipMemcpy((double *) &path_weight[0], d_path_weight, path_weight_size, hipMemcpyDeviceToHost);
//cleanup
hipFree(d_offsets); hipFree(d_edge_dests); hipFree(d_weights);
hipFree(d_predecessors); hipFree(d_path_weight);
return (double) timer.elapsed().wall / 1000000000.0;
}
//Simple test code
__global__ void test_add(int *a, int *b, int *c){
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
bool CSR_Graph::test_cuda(){
int N=1000;
int *a, *b, *c;
int *d_a, *d_b, *d_c;
bool result = true;
int size=N*sizeof(int);
a = (int *) malloc(size);
b = (int *) malloc(size);
c = (int *) malloc(size);
hipMalloc((void **) & d_a, size);
hipMalloc((void **) & d_b, size);
hipMalloc((void **) & d_c, size);
std::cout<<std::endl<<"GPU output"<<std::endl;
for(int i=0; i<N; i++){
a[i]=i;
b[i]=i*i;
}
//Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test_add), dim3(N),dim3(1), 0, 0, d_a,d_b,d_c);
//Copy results back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
std::cout<<std::endl<<"GPU output"<<std::endl;
for(int i=0; i<N; i++){
//std::cout<<c[i]<<" ?= "<<a[i]+b[i]<<std::endl;
if(c[i] != a[i] + b[i]){
result = false;
}
}
//cleanup
hipFree(d_a); hipFree(d_b); hipFree(d_c);
free(a); free(b); free(c);
return result;
}
| 5e6e4a0902a74245768011a524a70241871e7a72.cu | /*
* CSR_Graph.cu
*
* Created on: Dec 12, 2014
* Author: pakij
*/
#include "CSR_Graph.h"
__device__ double atomicMin(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong( fmin(val, __longlong_as_double(assumed))) );
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
__global__ void BellmanFord_split1cuda(int * finished, int V, int E, int *offsets, int *edge_dests,
double *weights, int * preds, int * temp_preds, double * path_weights, double * temp_path_weights){
//int my_vert = blockIdx.x;
int my_vert = blockIdx.x *blockDim.x + threadIdx.x;
if(my_vert < V) {
double my_dist;
int first_target_index, last_target_index, target_index, target;
double new_dist;
my_dist = path_weights[my_vert];
//Find bounds of adjacency list
first_target_index = offsets[my_vert];
if(my_vert != V-1){
last_target_index = offsets[my_vert+1];
}
else{
last_target_index = E;
}
for(target_index = first_target_index; target_index < last_target_index; target_index++){
target = edge_dests[target_index];
new_dist = my_dist + weights[target_index];
// need to change path_weights[target] and update predecessors[target]
// Try to make this atomic
if(new_dist < atomicMin(&temp_path_weights[target], new_dist) ){
temp_preds[target] = my_vert;
//preds[target] = my_vert;
*finished = 0;
//temp_path_weights[target] = new_dist;
}
}
}
}
__global__ void BellmanFord_split2cuda(int V, int E, int *offsets, int *edge_dests, double *weights, int * preds, int * temp_preds, double * path_weights, double * temp_path_weights){
//int my_vert = blockIdx.x;
int my_vert = blockIdx.x *blockDim.x + threadIdx.x;
int first_target_index, last_target_index;
int pred_vert;
if(my_vert < V){
pred_vert = temp_preds[my_vert];
if(pred_vert >= 0 && pred_vert != my_vert){
//Update predecessors
preds[my_vert] = pred_vert;
//Find bounds of adjacency list of pred_vert, want to find edge that leads to my_vert
first_target_index = offsets[pred_vert];
if(pred_vert != V-1){
last_target_index = offsets[pred_vert+1];
}
else{
last_target_index = E;
}
//Update path_weights
for(int i=first_target_index; i < last_target_index; i++){
if(edge_dests[i] == my_vert){
temp_path_weights[my_vert] = path_weights[pred_vert] + weights[i];
break;
}
}
}
}
}
double CSR_Graph::BellmanFordGPU_Split(int source_, std::vector <int> &predecessors, std::vector <double> &path_weight){
int num_blocks = (V + threads_per_block - 1) / threads_per_block;
//Initialize predecessor tree
predecessors.clear();
path_weight.clear();
double inf = std::numeric_limits<double>::infinity();
predecessors.resize(V,-1);
path_weight.resize(V,E*max_weight);
predecessors[source_]=source_;
path_weight[source_]=0;
int finished;
boost::timer::auto_cpu_timer t;
//GPU pointers
int * d_offsets;
int * d_edge_dests;
double * d_weights;
int * d_predecessors;
double * d_path_weight;
int * d_temp_predecessors;
double * d_temp_path_weight;
int * d_finished;
double * temp;
//Size of CSR graph
int offsets_size = V*sizeof(int);
int edge_dests_size = E*sizeof(int);
int weights_size = E*sizeof(double);
//Size of predecessor tree into
int predecessors_size = V*sizeof(int);
int temp_predecessors_size = V*sizeof(int);
int path_weight_size = V*sizeof(double);
//Allocate memory on device
cudaMalloc((void **) & d_offsets, offsets_size);
cudaMalloc((void **) & d_edge_dests, edge_dests_size);
cudaMalloc((void **) & d_weights, weights_size);
cudaMalloc((void **) & d_predecessors, predecessors_size);
cudaMalloc((void **) & d_temp_predecessors, temp_predecessors_size);
cudaMalloc((void **) & d_path_weight, path_weight_size);
cudaMalloc((void **) & d_temp_path_weight, path_weight_size);
cudaMalloc((void **) & d_finished, sizeof(int));
std::cout<<"Transferring to GPU"<<std::endl;
cudaMemcpy(d_offsets, (int *) &offsets[0], offsets_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_edge_dests, (int *) &edge_dests[0], edge_dests_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_weights, (double *) &weights[0], weights_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_predecessors, (int *) &predecessors[0], predecessors_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_temp_predecessors, (int *) &predecessors[0], temp_predecessors_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_path_weight, (double *) &path_weight[0], path_weight_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_temp_path_weight, (double *) &path_weight[0], path_weight_size, cudaMemcpyHostToDevice);
std::cout<<"Running kernel with <<<" << num_blocks << ", " << threads_per_block << ">>>" <<std::endl;
int iter=0;
finished=0;
boost::timer::cpu_timer timer;
// for(int iter=0; iter<V; iter++){
while(finished == 0 && iter < E) {
//std::cout<<"Iter = "<<iter<<std::endl;
finished=1;
//std::cout<<finished<<std::endl;
cudaMemcpy(d_finished, &finished, sizeof(int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
BellmanFord_split1cuda<<<num_blocks, threads_per_block>>>(d_finished, V, E, d_offsets,d_edge_dests,
d_weights,d_predecessors,d_temp_predecessors,d_path_weight, d_temp_path_weight);
cudaDeviceSynchronize();
cudaMemcpy(&finished, d_finished, sizeof(int), cudaMemcpyDeviceToHost);
BellmanFord_split2cuda<<<num_blocks, threads_per_block>>>(V, E, d_offsets,d_edge_dests,
d_weights,d_predecessors,d_temp_predecessors,d_path_weight, d_temp_path_weight);
cudaDeviceSynchronize();
cudaMemcpy(d_path_weight, d_temp_path_weight, path_weight_size, cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
cudaMemcpy((double *) &path_weight[0], d_path_weight, path_weight_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// for(int i=0; i<V; i++) {
// std::cout<<"V: "<<i<<",\t PW: "<<path_weight[i]<<std::endl;
// }
//std::cout<<finished<<std::endl;
// temp=d_path_weight;
// d_path_weight = d_temp_path_weight;
// d_temp_path_weight = temp;
iter++;
}
// }
timer.stop();
std::cout<<"Iterations = "<<iter<<std::endl;
//Copy results back to host
//cudaMemcpy((int *) &offsets[0], d_offsets, offsets_size, cudaMemcpyDeviceToHost);
//cudaMemcpy((int *) &edge_dests[0], d_edge_dests, edge_dests_size, cudaMemcpyDeviceToHost);
//cudaMemcpy((double *) &weights[0], d_weights, weights_size, cudaMemcpyDeviceToHost);
cudaMemcpy((int *) &predecessors[0], d_predecessors, predecessors_size, cudaMemcpyDeviceToHost);
cudaMemcpy((double *) &path_weight[0], d_path_weight, path_weight_size, cudaMemcpyDeviceToHost);
//cleanup
cudaFree(d_offsets); cudaFree(d_edge_dests); cudaFree(d_weights);
cudaFree(d_predecessors); cudaFree(d_path_weight); cudaFree(d_temp_predecessors);
for(int i=0; i<V; i++){
if(path_weight[i] == E*max_weight){
path_weight[i] = inf;
}
}
return (double) timer.elapsed().wall / 1000000000.0;
}
__global__ void BellmanFord_cuda(int V, int E, int *offsets, int *edge_dests, double *weights, int * preds, double * path_weights){
//int my_vert = blockIdx.x;
int my_vert = blockIdx.x *blockDim.x + threadIdx.x;
//int my_vert = threadIdx.x;
if(my_vert < V) {
int source_vert;
double my_dist = path_weights[my_vert];
double trial_dist;
source_vert=0;
for(int i=0; i<E; i++){
if(edge_dests[i] == my_vert){
//we can keep track of what the source vertex could be, since the edge list is sorted by them
while(source_vert != V-1 && offsets[source_vert+1] <= i){
source_vert++;
}
trial_dist = weights[i] + path_weights[source_vert]; //Data race, possibly benign?
if(trial_dist < my_dist){
path_weights[my_vert] = trial_dist;
preds[my_vert] = source_vert;
}
}
}
}
}
double CSR_Graph::BellmanFordGPU(int source_, std::vector <int> &predecessors, std::vector <double> &path_weight){
int num_blocks = (V + threads_per_block - 1) / threads_per_block;
//Initialize predecessor tree
predecessors.clear();
path_weight.clear();
double inf = std::numeric_limits<double>::infinity();
predecessors.resize(V,-1);
path_weight.resize(V,E*max_weight);
predecessors[source_]=source_;
path_weight[source_]=0;
boost::timer::auto_cpu_timer t;
//GPU pointers
int * d_offsets;
int * d_edge_dests;
double * d_weights;
int * d_predecessors;
double * d_path_weight;
//Size of CSR graph
int offsets_size = V*sizeof(int);
int edge_dests_size = E*sizeof(int);
int weights_size = E*sizeof(double);
//Size of predecessor tree into
int predecessors_size = V*sizeof(int);
int path_weight_size = V*sizeof(double);
//Allocate memory on device
cudaMalloc((void **) & d_offsets, offsets_size);
cudaMalloc((void **) & d_edge_dests, edge_dests_size);
cudaMalloc((void **) & d_weights, weights_size);
cudaMalloc((void **) & d_predecessors, predecessors_size);
cudaMalloc((void **) & d_path_weight, path_weight_size);
std::cout<<"Transferring to GPU"<<std::endl;
cudaMemcpy(d_offsets, (int *) &offsets[0], offsets_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_edge_dests, (int *) &edge_dests[0], edge_dests_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_weights, (double *) &weights[0], weights_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_predecessors, (int *) &predecessors[0], predecessors_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_path_weight, (double *) &path_weight[0], path_weight_size, cudaMemcpyHostToDevice);
std::cout<<"Running kernel with <<<" << num_blocks << ", " << threads_per_block << ">>>" <<std::endl;
boost::timer::cpu_timer timer;
for(int iter=0; iter<V; iter++){
//std::cout<<iter<<std::endl;
BellmanFord_cuda<<<num_blocks, threads_per_block>>>(V, E, d_offsets,d_edge_dests,d_weights,d_predecessors,d_path_weight);
cudaDeviceSynchronize();
}
timer.stop();
//Copy results back to host
cudaMemcpy((int *) &offsets[0], d_offsets, offsets_size, cudaMemcpyDeviceToHost);
cudaMemcpy((int *) &edge_dests[0], d_edge_dests, edge_dests_size, cudaMemcpyDeviceToHost);
cudaMemcpy((double *) &weights[0], d_weights, weights_size, cudaMemcpyDeviceToHost);
cudaMemcpy((int *) &predecessors[0], d_predecessors, predecessors_size, cudaMemcpyDeviceToHost);
cudaMemcpy((double *) &path_weight[0], d_path_weight, path_weight_size, cudaMemcpyDeviceToHost);
//cleanup
cudaFree(d_offsets); cudaFree(d_edge_dests); cudaFree(d_weights);
cudaFree(d_predecessors); cudaFree(d_path_weight);
return (double) timer.elapsed().wall / 1000000000.0;
}
//Simple test code
__global__ void test_add(int *a, int *b, int *c){
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
bool CSR_Graph::test_cuda(){
int N=1000;
int *a, *b, *c;
int *d_a, *d_b, *d_c;
bool result = true;
int size=N*sizeof(int);
a = (int *) malloc(size);
b = (int *) malloc(size);
c = (int *) malloc(size);
cudaMalloc((void **) & d_a, size);
cudaMalloc((void **) & d_b, size);
cudaMalloc((void **) & d_c, size);
std::cout<<std::endl<<"GPU output"<<std::endl;
for(int i=0; i<N; i++){
a[i]=i;
b[i]=i*i;
}
//Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
test_add<<<N,1>>>(d_a,d_b,d_c);
//Copy results back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
std::cout<<std::endl<<"GPU output"<<std::endl;
for(int i=0; i<N; i++){
//std::cout<<c[i]<<" ?= "<<a[i]+b[i]<<std::endl;
if(c[i] != a[i] + b[i]){
result = false;
}
}
//cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
free(a); free(b); free(c);
return result;
}
|
8942b5c8c0d8675f42e3f01a3a36295c2a09af7d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "funset.h"
#include <iostream>
#include "book.h"
#include "cpu_bitmap.h"
#include "gpu_anim.h"
using namespace std;
int test1();//
int test2();//GPU
int test3();//
int test4();//JuliaCUDA
int test5();//
int test6();//
int test7();//rippleCUDA
int test8();//CUDA
int test9();//JuliaCUDA__syncthreads()
int test10();//(Ray Tracing)+GPU
int test11();//(Ray Tracing)+GPU
int test12();//
int test13();//
int test14();//rippleCUDA+OpenGL
int test15();//,CUDA+OpenGL
int test16();//atomicAdd
int test17();//
int test18();//stream
int test19();//stream
int test20();//
int test21();//GPU
int main(int argc, char* argv[])
{
//test1();
test2();
cout<<"ok!"<<endl;
return 0;
}
int test1()
{
int a = 2, b = 3, c = 0;
int* dev_c = NULL;
HANDLE_ERROR(hipMalloc((void**)&dev_c, sizeof(int)));
//CUDA
//,
//
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, a, b, dev_c);
HANDLE_ERROR(hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost));
printf("%d + %d = %d\n", a, b, c);
hipFree(dev_c);
return 0;
}
int test2()
{
int count = -1;
HANDLE_ERROR(hipGetDeviceCount(&count));
printf("device count: %d\n", count);
hipDeviceProp_t prop;
for (int i = 0; i < count; i++) {
HANDLE_ERROR(hipGetDeviceProperties(&prop, i));
printf(" --- General Information for device %d ---\n", i);
printf("Name: %s\n", prop.name);
printf("Compute capability: %d.%d\n", prop.major, prop.minor);
printf("Clock rate: %d\n", prop.clockRate);
printf("Device copy overlap: ");
if (prop.deviceOverlap) printf("Enabled\n");
else printf("Disabled\n");
printf("Kernel execution timeout : ");
if (prop.kernelExecTimeoutEnabled) printf("Enabled\n");
else printf("Disabled\n");
printf(" --- Memory Information for device %d ---\n", i);
printf("Total global mem: %ld\n", prop.totalGlobalMem);
printf("Total constant Mem: %ld\n", prop.totalConstMem);
printf("Max mem pitch: %ld\n", prop.memPitch);
printf("Texture Alignment: %ld\n", prop.textureAlignment);
printf(" --- MP Information for device %d ---\n", i);
printf("Multiprocessor count: %d\n", prop.multiProcessorCount);
printf("Shared mem per mp: %ld\n", prop.sharedMemPerBlock);
printf("Registers per mp: %d\n", prop.regsPerBlock);
printf("Threads in warp: %d\n", prop.warpSize);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0],
prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0],
prop.maxGridSize[1], prop.maxGridSize[2]);
printf("\n");
}
int dev;
HANDLE_ERROR(hipGetDevice(&dev));
printf("ID of current CUDA device: %d\n", dev);
memset(&prop, 0, sizeof(hipDeviceProp_t));
prop.major = 1;
prop.minor = 3;
HANDLE_ERROR(hipChooseDevice(&dev, &prop));
printf("ID of CUDA device closest to revision %d.%d: %d\n", prop.major, prop.minor, dev);
HANDLE_ERROR(hipSetDevice(dev));
return 0;
}
int test3()
{
int a[NUM] = {0}, b[NUM] = {0}, c[NUM] = {0};
int *dev_a = NULL, *dev_b = NULL, *dev_c = NULL;
//allocate the memory on the GPU
HANDLE_ERROR(hipMalloc((void**)&dev_a, NUM * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_b, NUM * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_c, NUM * sizeof(int)));
//fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<NUM; i++) {
a[i] = -i;
b[i] = i * i;
}
//copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR(hipMemcpy(dev_a, a, NUM * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, NUM * sizeof(int), hipMemcpyHostToDevice));
//
hipLaunchKernelGGL(( add_blockIdx), dim3(NUM),dim3(1), 0, 0, dev_a, dev_b, dev_c );
//copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR(hipMemcpy(c, dev_c, NUM * sizeof(int), hipMemcpyDeviceToHost));
//display the results
for (int i=0; i<NUM; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
//free the memory allocated on the GPU
HANDLE_ERROR(hipFree(dev_a));
HANDLE_ERROR(hipFree(dev_b));
HANDLE_ERROR(hipFree(dev_c));
return 0;
}
int test4()
{
//globals needed by the update routine
struct DataBlock {
unsigned char* dev_bitmap;
};
DataBlock data;
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char* dev_bitmap;
HANDLE_ERROR(hipMalloc((void**)&dev_bitmap, bitmap.image_size()));
data.dev_bitmap = dev_bitmap;
//
//dim3
//dim3CUDA31
dim3 grid(DIM, DIM);
hipLaunchKernelGGL(( kernel_julia), dim3(grid),dim3(1), 0, 0, dev_bitmap);
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipFree(dev_bitmap));
bitmap.display_and_exit();
return 0;
}
int test5()
{
int a[NUM], b[NUM], c[NUM];
int *dev_a = NULL, *dev_b = NULL, *dev_c = NULL;
//GPU
HANDLE_ERROR(hipMalloc((void**)&dev_a, NUM * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_b, NUM * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_c, NUM * sizeof(int)));
//CPU'a''b'
for (int i = 0; i < NUM; i++) {
a[i] = i;
b[i] = i * i;
}
//'a''b'GPU
HANDLE_ERROR(hipMemcpy(dev_a, a, NUM * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, NUM * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( add_threadIdx), dim3(1), dim3(NUM), 0, 0, dev_a, dev_b, dev_c);
//'c'GPUCPU
HANDLE_ERROR(hipMemcpy(c, dev_c, NUM * sizeof(int), hipMemcpyDeviceToHost));
//
for (int i = 0; i < NUM; i++) {
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
//GPU
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
int test6()
{
int a[NUM], b[NUM], c[NUM];
int *dev_a = NULL, *dev_b = NULL, *dev_c = NULL;
//GPU
HANDLE_ERROR(hipMalloc((void**)&dev_a, NUM * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_b, NUM * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_c, NUM * sizeof(int)));
//CPU'a''b'
for (int i = 0; i < NUM; i++) {
a[i] = i;
b[i] = i * i / 10;
}
//'a''b'GPU
HANDLE_ERROR(hipMemcpy(dev_a, a, NUM * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, NUM * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( add_blockIdx_threadIdx), dim3(128), dim3(128), 0, 0, dev_a, dev_b, dev_c);
//'c'GPUCPU
HANDLE_ERROR(hipMemcpy(c, dev_c, NUM * sizeof(int), hipMemcpyDeviceToHost));
//GPU
bool success = true;
for (int i = 0; i < NUM; i++) {
if ((a[i] + b[i]) != c[i]) {
printf("error: %d + %d != %d\n", a[i], b[i], c[i]);
success = false;
}
}
if (success)
printf("we did it!\n");
//GPU
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
int test7()
{
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
HANDLE_ERROR(hipMalloc((void**)&data.dev_bitmap, bitmap.image_size()));
bitmap.anim_and_exit((void(*)(void*,int))generate_frame, (void(*)(void*))cleanup);
return 0;
}
void generate_frame(DataBlock *d, int ticks)
{
dim3 blocks(DIM/16, DIM/16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( ripple_kernel), dim3(blocks),dim3(threads), 0, 0, d->dev_bitmap, ticks);
HANDLE_ERROR(hipMemcpy(d->bitmap->get_ptr(), d->dev_bitmap, d->bitmap->image_size(), hipMemcpyDeviceToHost));
}
//clean up memory allocated on the GPU
void cleanup(DataBlock *d)
{
HANDLE_ERROR(hipFree(d->dev_bitmap));
}
int test8()
{
float *a, *b, c, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
//allocate memory on the cpu side
a = (float*)malloc(NUM * sizeof(float));
b = (float*)malloc(NUM * sizeof(float));
partial_c = (float*)malloc(blocksPerGrid * sizeof(float));
//allocate the memory on the GPU
HANDLE_ERROR(hipMalloc((void**)&dev_a, NUM * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&dev_b, NUM * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&dev_partial_c, blocksPerGrid*sizeof(float)));
//fill in the host memory with data
for (int i = 0; i < NUM; i++) {
a[i] = i;
b[i] = i*2;
}
//copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR(hipMemcpy(dev_a, a, NUM * sizeof(float), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, NUM * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( dot_kernel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_c);
//copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR(hipMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), hipMemcpyDeviceToHost));
//finish up on the CPU side
c = 0;
for (int i = 0; i < blocksPerGrid; i++) {
c += partial_c[i];
}
//0NUM-12
//
#define sum_squares(x) (x * (x + 1) * (2 * x + 1) / 6)
printf("Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares((float)(NUM - 1)));
//free memory on the gpu side
HANDLE_ERROR(hipFree(dev_a));
HANDLE_ERROR(hipFree(dev_b));
HANDLE_ERROR(hipFree(dev_partial_c));
//free memory on the cpu side
free(a);
free(b);
free(partial_c);
return 0;
}
int test9()
{
DataBlock data;
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char *dev_bitmap;
HANDLE_ERROR(hipMalloc((void**)&dev_bitmap, bitmap.image_size()));
data.dev_bitmap = dev_bitmap;
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16,16);
hipLaunchKernelGGL(( julia_kernel), dim3(grids), dim3(threads), 0, 0, dev_bitmap);
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipFree(dev_bitmap));
bitmap.display_and_exit();
return 0;
}
int test10()
{
DataBlock data;
//capture the start time
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char *dev_bitmap;
Sphere *s;
//allocate memory on the GPU for the output bitmap
HANDLE_ERROR(hipMalloc((void**)&dev_bitmap, bitmap.image_size()));
//allocate memory for the Sphere dataset
HANDLE_ERROR(hipMalloc((void**)&s, sizeof(Sphere) * SPHERES));
//allocate temp memory, initialize it, copy to memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
for (int i = 0; i < SPHERES; i++) {
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
HANDLE_ERROR(hipMemcpy( s, temp_s, sizeof(Sphere) * SPHERES, hipMemcpyHostToDevice));
free(temp_s);
//generate a bitmap from our sphere data
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( RayTracing_kernel), dim3(grids), dim3(threads), 0, 0, s, dev_bitmap);
//copy our bitmap back from the GPU for display
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost));
//get stop time, and display the timing results
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
printf("Time to generate: %3.1f ms\n", elapsedTime);
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
HANDLE_ERROR(hipFree(dev_bitmap));
HANDLE_ERROR(hipFree(s));
// display
bitmap.display_and_exit();
return 0;
}
int test11()
{
DataBlock data;
//capture the start time
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char *dev_bitmap;
//allocate memory on the GPU for the output bitmap
HANDLE_ERROR(hipMalloc((void**)&dev_bitmap, bitmap.image_size()));
//allocate temp memory, initialize it, copy to constant memory on the GPU, then free temp memory
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
for (int i = 0; i < SPHERES; i++) {
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
HANDLE_ERROR(hipMemcpyToSymbol(s, temp_s, sizeof(Sphere) * SPHERES));
free(temp_s);
//generate a bitmap from our sphere data
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( RayTracing_kernel), dim3(grids), dim3(threads), 0, 0, dev_bitmap);
//copy our bitmap back from the GPU for display
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost));
//get stop time, and display the timing results
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
printf("Time to generate: %3.1f ms\n", elapsedTime);
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
HANDLE_ERROR(hipFree(dev_bitmap));
//display
bitmap.display_and_exit();
return 0;
}
int test12()
{
Heat_DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR(hipEventCreate(&data.start));
HANDLE_ERROR(hipEventCreate(&data.stop));
int imageSize = bitmap.image_size();
HANDLE_ERROR(hipMalloc((void**)&data.output_bitmap, imageSize));
//assume float == 4 chars in size (ie rgba)
HANDLE_ERROR(hipMalloc((void**)&data.dev_inSrc, imageSize));
HANDLE_ERROR(hipMalloc((void**)&data.dev_outSrc, imageSize));
HANDLE_ERROR(hipMalloc((void**)&data.dev_constSrc, imageSize));
HANDLE_ERROR(hipBindTexture(NULL, texConstSrc, data.dev_constSrc, imageSize));
HANDLE_ERROR(hipBindTexture(NULL, texIn, data.dev_inSrc, imageSize));
HANDLE_ERROR(hipBindTexture(NULL, texOut, data.dev_outSrc, imageSize));
//intialize the constant data
float *temp = (float*)malloc(imageSize);
for (int i = 0; i < DIM*DIM; i++) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x>300) && (x<600) && (y>310) && (y<601))
temp[i] = MAX_TEMP;
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y < 900; y++) {
for (int x = 400; x < 500; x++) {
temp[x + y * DIM] = MIN_TEMP;
}
}
HANDLE_ERROR(hipMemcpy(data.dev_constSrc, temp, imageSize, hipMemcpyHostToDevice));
//initialize the input data
for (int y = 800; y < DIM; y++) {
for (int x = 0; x < 200; x++) {
temp[x+y*DIM] = MAX_TEMP;
}
}
HANDLE_ERROR(hipMemcpy(data.dev_inSrc, temp,imageSize, hipMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit((void (*)(void*,int))Heat_anim_gpu, (void (*)(void*))Heat_anim_exit);
return 0;
}
int test13()
{
Heat_DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR(hipEventCreate(&data.start));
HANDLE_ERROR(hipEventCreate(&data.stop));
int imageSize = bitmap.image_size();
HANDLE_ERROR(hipMalloc((void**)&data.output_bitmap, imageSize));
//assume float == 4 chars in size (ie rgba)
HANDLE_ERROR(hipMalloc((void**)&data.dev_inSrc, imageSize));
HANDLE_ERROR(hipMalloc((void**)&data.dev_outSrc, imageSize));
HANDLE_ERROR(hipMalloc((void**)&data.dev_constSrc, imageSize));
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
HANDLE_ERROR(hipBindTexture2D(NULL, texConstSrc2, data.dev_constSrc, desc, DIM, DIM, sizeof(float) * DIM));
HANDLE_ERROR(hipBindTexture2D(NULL, texIn2, data.dev_inSrc, desc, DIM, DIM, sizeof(float) * DIM));
HANDLE_ERROR(hipBindTexture2D(NULL, texOut2, data.dev_outSrc, desc, DIM, DIM, sizeof(float) * DIM));
//initialize the constant data
float *temp = (float*)malloc(imageSize);
for (int i = 0; i < DIM*DIM; i++) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x > 300) && ( x < 600) && (y > 310) && (y < 601))
temp[i] = MAX_TEMP;
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y < 900; y++) {
for (int x = 400; x < 500; x++) {
temp[x + y * DIM] = MIN_TEMP;
}
}
HANDLE_ERROR(hipMemcpy(data.dev_constSrc, temp, imageSize, hipMemcpyHostToDevice));
//initialize the input data
for (int y = 800; y < DIM; y++) {
for (int x = 0; x < 200; x++) {
temp[x + y * DIM] = MAX_TEMP;
}
}
HANDLE_ERROR(hipMemcpy(data.dev_inSrc, temp,imageSize, hipMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit((void (*)(void*,int))anim_gpu, (void (*)(void*))anim_exit);
return 0;
}
void Heat_anim_gpu(Heat_DataBlock *d, int ticks)
{
HANDLE_ERROR(hipEventRecord(d->start, 0));
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
//since tex is global and bound, we have to use a flag to
//select which is in/out per iteration
volatile bool dstOut = true;
for (int i = 0; i < 90; i++) {
float *in, *out;
if (dstOut) {
in = d->dev_inSrc;
out = d->dev_outSrc;
} else {
out = d->dev_inSrc;
in = d->dev_outSrc;
}
hipLaunchKernelGGL(( Heat_copy_const_kernel), dim3(blocks), dim3(threads), 0, 0, in);
hipLaunchKernelGGL(( Heat_blend_kernel), dim3(blocks), dim3(threads), 0, 0, out, dstOut);
dstOut = !dstOut;
}
hipLaunchKernelGGL(( float_to_color), dim3(blocks), dim3(threads), 0, 0, d->output_bitmap, d->dev_inSrc);
HANDLE_ERROR(hipMemcpy(bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipEventRecord(d->stop, 0));
HANDLE_ERROR(hipEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, d->start, d->stop));
d->totalTime += elapsedTime;
++d->frames;
printf( "Average Time per frame: %3.1f ms\n", d->totalTime/d->frames );
}
void anim_gpu(Heat_DataBlock *d, int ticks)
{
HANDLE_ERROR(hipEventRecord(d->start, 0));
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
//since tex is global and bound, we have to use a flag to
//select which is in/out per iteration
volatile bool dstOut = true;
for (int i = 0; i < 90; i++) {
float *in, *out;
if (dstOut) {
in = d->dev_inSrc;
out = d->dev_outSrc;
} else {
out = d->dev_inSrc;
in = d->dev_outSrc;
}
hipLaunchKernelGGL(( copy_const_kernel), dim3(blocks), dim3(threads), 0, 0, in);
hipLaunchKernelGGL(( blend_kernel), dim3(blocks), dim3(threads), 0, 0, out, dstOut);
dstOut = !dstOut;
}
hipLaunchKernelGGL(( float_to_color), dim3(blocks), dim3(threads), 0, 0, d->output_bitmap, d->dev_inSrc);
HANDLE_ERROR(hipMemcpy(bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipEventRecord(d->stop, 0));
HANDLE_ERROR(hipEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, d->start, d->stop));
d->totalTime += elapsedTime;
++d->frames;
printf("Average Time per frame: %3.1f ms\n", d->totalTime/d->frames);
}
void Heat_anim_exit(Heat_DataBlock *d)
{
hipUnbindTexture(texIn);
hipUnbindTexture(texOut);
hipUnbindTexture(texConstSrc);
HANDLE_ERROR(hipFree(d->dev_inSrc));
HANDLE_ERROR(hipFree(d->dev_outSrc));
HANDLE_ERROR(hipFree(d->dev_constSrc));
HANDLE_ERROR(hipEventDestroy(d->start));
HANDLE_ERROR(hipEventDestroy(d->stop));
}
//clean up memory allocated on the GPU
void anim_exit(Heat_DataBlock *d)
{
hipUnbindTexture(texIn2);
hipUnbindTexture(texOut2);
hipUnbindTexture(texConstSrc2);
HANDLE_ERROR(hipFree(d->dev_inSrc));
HANDLE_ERROR(hipFree(d->dev_outSrc));
HANDLE_ERROR(hipFree(d->dev_constSrc));
HANDLE_ERROR(hipEventDestroy(d->start));
HANDLE_ERROR(hipEventDestroy(d->stop));
}
int test14()
{
GPUAnimBitmap bitmap(DIM, DIM, NULL);
bitmap.anim_and_exit((void (*)(uchar4*, void*, int))generate_frame_opengl, NULL);
return 0;
}
int test15()
{
DataBlock_opengl data;
GPUAnimBitmap bitmap(DIM, DIM, &data);
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR(hipEventCreate(&data.start));
HANDLE_ERROR(hipEventCreate(&data.stop));
int imageSize = bitmap.image_size();
//assume float == 4 chars in size (ie rgba)
HANDLE_ERROR(hipMalloc((void**)&data.dev_inSrc, imageSize));
HANDLE_ERROR(hipMalloc((void**)&data.dev_outSrc, imageSize));
HANDLE_ERROR(hipMalloc((void**)&data.dev_constSrc, imageSize));
HANDLE_ERROR(hipBindTexture(NULL, texConstSrc ,data.dev_constSrc, imageSize));
HANDLE_ERROR(hipBindTexture(NULL, texIn, data.dev_inSrc, imageSize));
HANDLE_ERROR(hipBindTexture(NULL, texOut, data.dev_outSrc, imageSize));
//intialize the constant data
float *temp = (float*)malloc(imageSize);
for (int i = 0; i < DIM*DIM; i++) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x>300) && (x<600) && (y>310) && (y<601))
temp[i] = MAX_TEMP;
}
temp[DIM*100+100] = (MAX_TEMP + MIN_TEMP)/2;
temp[DIM*700+100] = MIN_TEMP;
temp[DIM*300+300] = MIN_TEMP;
temp[DIM*200+700] = MIN_TEMP;
for (int y = 800; y < 900; y++) {
for (int x = 400; x < 500; x++) {
temp[x+y*DIM] = MIN_TEMP;
}
}
HANDLE_ERROR(hipMemcpy(data.dev_constSrc, temp, imageSize, hipMemcpyHostToDevice));
//initialize the input data
for (int y = 800; y < DIM; y++) {
for (int x = 0; x < 200; x++) {
temp[x+y*DIM] = MAX_TEMP;
}
}
HANDLE_ERROR(hipMemcpy(data.dev_inSrc, temp, imageSize, hipMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit((void (*)(uchar4*, void*, int))anim_gpu_opengl, (void (*)(void*))anim_exit_opengl);
return 0;
}
void anim_gpu_opengl(uchar4* outputBitmap, DataBlock_opengl *d, int ticks)
{
HANDLE_ERROR(hipEventRecord(d->start, 0));
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
//since tex is global and bound, we have to use a flag to select which is in/out per iteration
volatile bool dstOut = true;
for (int i = 0; i < 90; i++) {
float *in, *out;
if (dstOut) {
in = d->dev_inSrc;
out = d->dev_outSrc;
} else {
out = d->dev_inSrc;
in = d->dev_outSrc;
}
hipLaunchKernelGGL(( Heat_copy_const_kernel_opengl), dim3(blocks), dim3(threads), 0, 0, in);
hipLaunchKernelGGL(( Heat_blend_kernel_opengl), dim3(blocks), dim3(threads), 0, 0, out, dstOut);
dstOut = !dstOut;
}
hipLaunchKernelGGL(( float_to_color), dim3(blocks), dim3(threads), 0, 0, outputBitmap, d->dev_inSrc);
HANDLE_ERROR(hipEventRecord(d->stop, 0));
HANDLE_ERROR(hipEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, d->start, d->stop));
d->totalTime += elapsedTime;
++d->frames;
printf("Average Time per frame: %3.1f ms\n", d->totalTime/d->frames);
}
void anim_exit_opengl(DataBlock_opengl *d)
{
HANDLE_ERROR(hipUnbindTexture(texIn));
HANDLE_ERROR(hipUnbindTexture(texOut));
HANDLE_ERROR(hipUnbindTexture(texConstSrc));
HANDLE_ERROR(hipFree(d->dev_inSrc));
HANDLE_ERROR(hipFree(d->dev_outSrc));
HANDLE_ERROR(hipFree(d->dev_constSrc));
HANDLE_ERROR(hipEventDestroy(d->start));
HANDLE_ERROR(hipEventDestroy(d->stop));
}
int test16()
{
unsigned char *buffer = (unsigned char*)big_random_block(SIZE);
//capture the start time starting the timer here so that we include the cost of
//all of the operations on the GPU. if the data were already on the GPU and we just
//timed the kernel the timing would drop from 74 ms to 15 ms. Very fast.
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
// allocate memory on the GPU for the file's data
unsigned char *dev_buffer;
unsigned int *dev_histo;
HANDLE_ERROR(hipMalloc((void**)&dev_buffer, SIZE));
HANDLE_ERROR(hipMemcpy(dev_buffer, buffer, SIZE, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void**)&dev_histo, 256 * sizeof(int)));
HANDLE_ERROR(hipMemset(dev_histo, 0, 256 * sizeof(int)));
//kernel launch - 2x the number of mps gave best timing
hipDeviceProp_t prop;
HANDLE_ERROR(hipGetDeviceProperties(&prop, 0));
int blocks = prop.multiProcessorCount;
hipLaunchKernelGGL(( histo_kernel), dim3(blocks*2), dim3(256), 0, 0, dev_buffer, SIZE, dev_histo);
unsigned int histo[256];
HANDLE_ERROR(hipMemcpy(histo, dev_histo, 256 * sizeof(int), hipMemcpyDeviceToHost));
//get stop time, and display the timing results
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
printf("Time to generate: %3.1f ms\n", elapsedTime);
long histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += histo[i];
}
printf("Histogram Sum: %ld\n", histoCount);
//verify that we have the same counts via CPU
for (int i = 0; i < SIZE; i++)
histo[buffer[i]]--;
for (int i = 0; i < 256; i++) {
if (histo[i] != 0)
printf("Failure at %d!\n", i);
}
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
hipFree(dev_histo);
hipFree(dev_buffer);
free(buffer);
return 0;
}
float cuda_malloc_test(int size, bool up)
{
hipEvent_t start, stop;
int *a, *dev_a;
float elapsedTime;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
a = (int*)malloc(size * sizeof(*a));
HANDLE_NULL(a);
HANDLE_ERROR(hipMalloc((void**)&dev_a,size * sizeof(*dev_a)));
HANDLE_ERROR(hipEventRecord(start, 0));
for (int i=0; i<100; i++) {
if (up)
HANDLE_ERROR(hipMemcpy(dev_a, a, size * sizeof( *dev_a ), hipMemcpyHostToDevice));
else
HANDLE_ERROR(hipMemcpy(a, dev_a, size * sizeof(*dev_a), hipMemcpyDeviceToHost));
}
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
free(a);
HANDLE_ERROR(hipFree(dev_a));
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
return elapsedTime;
}
float cuda_host_alloc_test(int size, bool up)
{
hipEvent_t start, stop;
int *a, *dev_a;
float elapsedTime;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipHostMalloc((void**)&a,size * sizeof(*a), hipHostMallocDefault));
HANDLE_ERROR(hipMalloc((void**)&dev_a, size * sizeof(*dev_a)));
HANDLE_ERROR(hipEventRecord(start, 0));
for (int i=0; i<100; i++) {
if (up)
HANDLE_ERROR(hipMemcpy(dev_a, a,size * sizeof(*a), hipMemcpyHostToDevice));
else
HANDLE_ERROR(hipMemcpy(a, dev_a,size * sizeof(*a), hipMemcpyDeviceToHost));
}
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
HANDLE_ERROR(hipHostFree(a));
HANDLE_ERROR(hipFree(dev_a));
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
return elapsedTime;
}
int test17()
{
float elapsedTime;
float MB = (float)100 * SIZE * sizeof(int) / 1024 / 1024;
//try it with hipMalloc
elapsedTime = cuda_malloc_test(SIZE, true);
printf("Time using hipMalloc: %3.1f ms\n", elapsedTime);
printf("\tMB/s during copy up: %3.1f\n", MB/(elapsedTime/1000));
elapsedTime = cuda_malloc_test(SIZE, false);
printf("Time using hipMalloc: %3.1f ms\n", elapsedTime);
printf("\tMB/s during copy down: %3.1f\n", MB/(elapsedTime/1000));
//now try it with hipHostMalloc
elapsedTime = cuda_host_alloc_test(SIZE, true);
printf("Time using hipHostMalloc: %3.1f ms\n", elapsedTime);
printf("\tMB/s during copy up: %3.1f\n", MB/(elapsedTime/1000));
elapsedTime = cuda_host_alloc_test(SIZE, false);
printf("Time using hipHostMalloc: %3.1f ms\n", elapsedTime);
printf("\tMB/s during copy down: %3.1f\n", MB/(elapsedTime/1000));
return 0;
}
int test18()
{
hipDeviceProp_t prop;
int whichDevice;
HANDLE_ERROR(hipGetDevice(&whichDevice));
HANDLE_ERROR(hipGetDeviceProperties(&prop, whichDevice));
if (!prop.deviceOverlap) {
printf("Device will not handle overlaps, so no speed up from streams\n");
return 0;
}
hipEvent_t start, stop;
float elapsedTime;
hipStream_t stream;
int *host_a, *host_b, *host_c;
int *dev_a, *dev_b, *dev_c;
//start the timers
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
//initialize the stream
HANDLE_ERROR(hipStreamCreate(&stream));
//allocate the memory on the GPU
HANDLE_ERROR(hipMalloc((void**)&dev_a, NUM * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_b, NUM * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_c, NUM * sizeof(int)));
//allocate host locked memory, used to stream
HANDLE_ERROR(hipHostMalloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
HANDLE_ERROR(hipHostMalloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
HANDLE_ERROR(hipHostMalloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
for (int i=0; i<FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
HANDLE_ERROR(hipEventRecord(start, 0));
//now loop over full data, in bite-sized chunks
for (int i=0; i<FULL_DATA_SIZE; i+= NUM) {
//copy the locked memory to the device, async
HANDLE_ERROR(hipMemcpyAsync(dev_a, host_a+i, NUM * sizeof(int), hipMemcpyHostToDevice, stream));
HANDLE_ERROR(hipMemcpyAsync(dev_b, host_b+i, NUM * sizeof(int), hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(( singlestream_kernel), dim3(NUM/256), dim3(256), 0, stream, dev_a, dev_b, dev_c);
//copy the data from device to locked memory
HANDLE_ERROR(hipMemcpyAsync(host_c+i, dev_c, NUM * sizeof(int), hipMemcpyDeviceToHost, stream));
}
// copy result chunk from locked to full buffer
HANDLE_ERROR(hipStreamSynchronize(stream));
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
printf("Time taken: %3.1f ms\n", elapsedTime);
//cleanup the streams and memory
HANDLE_ERROR(hipHostFree(host_a));
HANDLE_ERROR(hipHostFree(host_b));
HANDLE_ERROR(hipHostFree(host_c));
HANDLE_ERROR(hipFree(dev_a));
HANDLE_ERROR(hipFree(dev_b));
HANDLE_ERROR(hipFree(dev_c));
HANDLE_ERROR(hipStreamDestroy(stream));
return 0;
}
int test19()
{
hipDeviceProp_t prop;
int whichDevice;
HANDLE_ERROR(hipGetDevice(&whichDevice));
HANDLE_ERROR(hipGetDeviceProperties(&prop, whichDevice));
if (!prop.deviceOverlap) {
printf( "Device will not handle overlaps, so no speed up from streams\n" );
return 0;
}
//start the timers
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
//initialize the streams
hipStream_t stream0, stream1;
HANDLE_ERROR(hipStreamCreate(&stream0));
HANDLE_ERROR(hipStreamCreate(&stream1));
int *host_a, *host_b, *host_c;
int *dev_a0, *dev_b0, *dev_c0;//0GPU
int *dev_a1, *dev_b1, *dev_c1;//1GPU
//allocate the memory on the GPU
HANDLE_ERROR(hipMalloc((void**)&dev_a0, NUM * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_b0, NUM * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_c0, NUM * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_a1, NUM * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_b1, NUM * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_c1, NUM * sizeof(int)));
//allocate host locked memory, used to stream
HANDLE_ERROR(hipHostMalloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
HANDLE_ERROR(hipHostMalloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
HANDLE_ERROR(hipHostMalloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
for (int i=0; i<FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
HANDLE_ERROR(hipEventRecord(start, 0));
//now loop over full data, in bite-sized chunks
for (int i=0; i<FULL_DATA_SIZE; i+= NUM*2) {
//enqueue copies of a in stream0 and stream1
//
HANDLE_ERROR(hipMemcpyAsync(dev_a0, host_a+i, NUM * sizeof(int), hipMemcpyHostToDevice, stream0));
HANDLE_ERROR(hipMemcpyAsync(dev_a1, host_a+i+NUM, NUM * sizeof(int), hipMemcpyHostToDevice, stream1));
//enqueue copies of b in stream0 and stream1
HANDLE_ERROR(hipMemcpyAsync(dev_b0, host_b+i, NUM * sizeof(int), hipMemcpyHostToDevice, stream0));
HANDLE_ERROR(hipMemcpyAsync(dev_b1, host_b+i+NUM, NUM * sizeof(int), hipMemcpyHostToDevice, stream1));
//enqueue kernels in stream0 and stream1
hipLaunchKernelGGL(( singlestream_kernel), dim3(NUM/256), dim3(256), 0, stream0, dev_a0, dev_b0, dev_c0);
hipLaunchKernelGGL(( singlestream_kernel), dim3(NUM/256), dim3(256), 0, stream1, dev_a1, dev_b1, dev_c1);
//enqueue copies of c from device to locked memory
HANDLE_ERROR(hipMemcpyAsync(host_c+i, dev_c0, NUM * sizeof(int), hipMemcpyDeviceToHost, stream0));
HANDLE_ERROR(hipMemcpyAsync(host_c+i+NUM, dev_c1, NUM * sizeof(int), hipMemcpyDeviceToHost, stream1));
}
float elapsedTime;
HANDLE_ERROR(hipStreamSynchronize(stream0));
HANDLE_ERROR(hipStreamSynchronize(stream1));
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime,start, stop));
printf( "Time taken: %3.1f ms\n", elapsedTime );
//cleanup the streams and memory
HANDLE_ERROR(hipHostFree(host_a));
HANDLE_ERROR(hipHostFree(host_b));
HANDLE_ERROR(hipHostFree(host_c));
HANDLE_ERROR(hipFree(dev_a0));
HANDLE_ERROR(hipFree(dev_b0));
HANDLE_ERROR(hipFree(dev_c0));
HANDLE_ERROR(hipFree(dev_a1));
HANDLE_ERROR(hipFree(dev_b1));
HANDLE_ERROR(hipFree(dev_c1));
HANDLE_ERROR(hipStreamDestroy(stream0));
HANDLE_ERROR(hipStreamDestroy(stream1));
return 0;
}
float malloc_test(int size)
{
hipEvent_t start, stop;
float *a, *b, c, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
float elapsedTime;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
//allocate memory on the CPU side
a = (float*)malloc(size * sizeof(float));
b = (float*)malloc(size * sizeof(float));
partial_c = (float*)malloc(blocksPerGrid * sizeof(float));
//allocate the memory on the GPU
HANDLE_ERROR(hipMalloc((void**)&dev_a, size * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&dev_b, size * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&dev_partial_c, blocksPerGrid * sizeof(float)));
//fill in the host memory with data
for (int i=0; i<size; i++) {
a[i] = i;
b[i] = i * 2;
}
HANDLE_ERROR(hipEventRecord(start, 0));
//copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR(hipMemcpy(dev_a, a, size * sizeof(float), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, size * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( dot_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, size, dev_a, dev_b, dev_partial_c);
//copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR(hipMemcpy(partial_c, dev_partial_c,blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime,start, stop));
//finish up on the CPU side
c = 0;
for (int i=0; i<blocksPerGrid; i++) {
c += partial_c[i];
}
HANDLE_ERROR(hipFree(dev_a));
HANDLE_ERROR(hipFree(dev_b));
HANDLE_ERROR(hipFree(dev_partial_c));
//free memory on the CPU side
free(a);
free(b);
free(partial_c);
//free events
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
printf("Value calculated: %f\n", c);
return elapsedTime;
}
float cuda_host_alloc_test(int size)
{
hipEvent_t start, stop;
float *a, *b, c, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
float elapsedTime;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
//allocate the memory on the CPU
HANDLE_ERROR(hipHostMalloc((void**)&a, size*sizeof(float), hipHostMallocWriteCombined |hipHostMallocMapped));
HANDLE_ERROR(hipHostMalloc((void**)&b, size*sizeof(float), hipHostMallocWriteCombined |hipHostMallocMapped));
HANDLE_ERROR(hipHostMalloc((void**)&partial_c, blocksPerGrid*sizeof(float), hipHostMallocMapped));
//find out the GPU pointers
HANDLE_ERROR(hipHostGetDevicePointer(&dev_a, a, 0));
HANDLE_ERROR(hipHostGetDevicePointer(&dev_b, b, 0));
HANDLE_ERROR( hipHostGetDevicePointer(&dev_partial_c, partial_c, 0));
//fill in the host memory with data
for (int i=0; i<size; i++) {
a[i] = i;
b[i] = i*2;
}
HANDLE_ERROR(hipEventRecord(start, 0));
hipLaunchKernelGGL(( dot_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, size, dev_a, dev_b, dev_partial_c);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime,start, stop));
//finish up on the CPU side
c = 0;
for (int i=0; i<blocksPerGrid; i++) {
c += partial_c[i];
}
HANDLE_ERROR(hipHostFree(a));
HANDLE_ERROR(hipHostFree(b));
HANDLE_ERROR(hipHostFree(partial_c));
// free events
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
printf("Value calculated: %f\n", c);
return elapsedTime;
}
int test20()
{
hipDeviceProp_t prop;
int whichDevice;
HANDLE_ERROR(hipGetDevice(&whichDevice));
HANDLE_ERROR(hipGetDeviceProperties(&prop, whichDevice));
if (prop.canMapHostMemory != 1) {
printf( "Device can not map memory.\n" );
return 0;
}
HANDLE_ERROR(hipSetDeviceFlags(hipDeviceMapHost));
//try it with malloc
float elapsedTime = malloc_test(NUM);
printf("Time using hipMalloc: %3.1f ms\n", elapsedTime);
//now try it with hipHostMalloc
elapsedTime = cuda_host_alloc_test(NUM);
printf("Time using hipHostMalloc: %3.1f ms\n", elapsedTime);
return 0;
}
void* routine(void *pvoidData)
{
DataStruct *data = (DataStruct*)pvoidData;
HANDLE_ERROR(hipSetDevice(data->deviceID));
int size = data->size;
float *a, *b, c, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
//allocate memory on the CPU side
a = data->a;
b = data->b;
partial_c = (float*)malloc(blocksPerGrid * sizeof(float));
//allocate the memory on the GPU
HANDLE_ERROR(hipMalloc((void**)&dev_a, size * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&dev_b, size * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&dev_partial_c, blocksPerGrid*sizeof(float)));
//copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR(hipMemcpy(dev_a, a, size*sizeof(float), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, size*sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( dot_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, size, dev_a, dev_b, dev_partial_c);
//copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR(hipMemcpy( partial_c, dev_partial_c, blocksPerGrid * sizeof(float), hipMemcpyDeviceToHost));
//finish up on the CPU side
c = 0;
for (int i=0; i<blocksPerGrid; i++) {
c += partial_c[i];
}
HANDLE_ERROR(hipFree(dev_a));
HANDLE_ERROR(hipFree(dev_b));
HANDLE_ERROR(hipFree(dev_partial_c));
//free memory on the CPU side
free(partial_c);
data->returnValue = c;
return 0;
}
int test21()
{
int deviceCount;
HANDLE_ERROR(hipGetDeviceCount(&deviceCount));
if (deviceCount < 2) {
printf("We need at least two compute 1.0 or greater devices, but only found %d\n", deviceCount);
return 0;
}
float *a = (float*)malloc(sizeof(float) * NUM);
HANDLE_NULL(a);
float *b = (float*)malloc(sizeof(float) * NUM);
HANDLE_NULL(b);
//fill in the host memory with data
for (int i=0; i<NUM; i++) {
a[i] = i;
b[i] = i*2;
}
//prepare for multithread
DataStruct data[2];
data[0].deviceID = 0;
data[0].size = NUM/2;
data[0].a = a;
data[0].b = b;
data[1].deviceID = 1;
data[1].size = NUM/2;
data[1].a = a + NUM/2;
data[1].b = b + NUM/2;
CUTThread thread = start_thread(routine, &(data[0]));
routine(&(data[1]));
end_thread(thread);
//free memory on the CPU side
free(a);
free(b);
printf("Value calculated: %f\n", data[0].returnValue + data[1].returnValue);
return 0;
}
| 8942b5c8c0d8675f42e3f01a3a36295c2a09af7d.cu | #include "funset.h"
#include <iostream>
#include "book.h"
#include "cpu_bitmap.h"
#include "gpu_anim.h"
using namespace std;
int test1();//简单的两数相加
int test2();//获取GPU设备相关属性
int test3();//通过线程块索引来计算两个矢量和
int test4();//Julia的CUDA实现
int test5();//通过线程索引来计算两个矢量和
int test6();//通过线程块索引和线程索引来计算两个矢量和
int test7();//ripple的CUDA实现
int test8();//点积运算的CUDA实现
int test9();//Julia的CUDA实现,加入了线程同步函数__syncthreads()
int test10();//光线跟踪(Ray Tracing)实现,没有常量内存+使用事件来计算GPU运行时间
int test11();//光线跟踪(Ray Tracing)实现,使用常量内存+使用事件来计算GPU运行时间
int test12();//模拟热传导,使用纹理内存,有些问题
int test13();//模拟热传导,使用二维纹理内存,有些问题
int test14();//ripple的CUDA+OpenGL实现
int test15();//模拟热传导,CUDA+OpenGL实现,有些问题
int test16();//直方图计算,利用原子操作函数atomicAdd实现
int test17();//固定内存的使用
int test18();//单个stream的使用
int test19();//多个stream的使用
int test20();//通过零拷贝内存的方式实现点积运算
int test21();//使用多个GPU实现点积运算
int main(int argc, char* argv[])
{
//test1();
test2();
cout<<"ok!"<<endl;
return 0;
}
int test1()
{
int a = 2, b = 3, c = 0;
int* dev_c = NULL;
HANDLE_ERROR(cudaMalloc((void**)&dev_c, sizeof(int)));
//尖括号表示要将一些参数传递给CUDA编译器和运行时系统
//尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何启动设备代码,
//传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函数调用一样
add<<<1, 1>>>(a, b, dev_c);
HANDLE_ERROR(cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost));
printf("%d + %d = %d\n", a, b, c);
cudaFree(dev_c);
return 0;
}
int test2()
{
int count = -1;
HANDLE_ERROR(cudaGetDeviceCount(&count));
printf("device count: %d\n", count);
cudaDeviceProp prop;
for (int i = 0; i < count; i++) {
HANDLE_ERROR(cudaGetDeviceProperties(&prop, i));
printf(" --- General Information for device %d ---\n", i);
printf("Name: %s\n", prop.name);
printf("Compute capability: %d.%d\n", prop.major, prop.minor);
printf("Clock rate: %d\n", prop.clockRate);
printf("Device copy overlap: ");
if (prop.deviceOverlap) printf("Enabled\n");
else printf("Disabled\n");
printf("Kernel execution timeout : ");
if (prop.kernelExecTimeoutEnabled) printf("Enabled\n");
else printf("Disabled\n");
printf(" --- Memory Information for device %d ---\n", i);
printf("Total global mem: %ld\n", prop.totalGlobalMem);
printf("Total constant Mem: %ld\n", prop.totalConstMem);
printf("Max mem pitch: %ld\n", prop.memPitch);
printf("Texture Alignment: %ld\n", prop.textureAlignment);
printf(" --- MP Information for device %d ---\n", i);
printf("Multiprocessor count: %d\n", prop.multiProcessorCount);
printf("Shared mem per mp: %ld\n", prop.sharedMemPerBlock);
printf("Registers per mp: %d\n", prop.regsPerBlock);
printf("Threads in warp: %d\n", prop.warpSize);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0],
prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0],
prop.maxGridSize[1], prop.maxGridSize[2]);
printf("\n");
}
int dev;
HANDLE_ERROR(cudaGetDevice(&dev));
printf("ID of current CUDA device: %d\n", dev);
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.major = 1;
prop.minor = 3;
HANDLE_ERROR(cudaChooseDevice(&dev, &prop));
printf("ID of CUDA device closest to revision %d.%d: %d\n", prop.major, prop.minor, dev);
HANDLE_ERROR(cudaSetDevice(dev));
return 0;
}
int test3()
{
int a[NUM] = {0}, b[NUM] = {0}, c[NUM] = {0};
int *dev_a = NULL, *dev_b = NULL, *dev_c = NULL;
//allocate the memory on the GPU
HANDLE_ERROR(cudaMalloc((void**)&dev_a, NUM * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, NUM * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_c, NUM * sizeof(int)));
//fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<NUM; i++) {
a[i] = -i;
b[i] = i * i;
}
//copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR(cudaMemcpy(dev_a, a, NUM * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, NUM * sizeof(int), cudaMemcpyHostToDevice));
//尖括号中的第一个参数表示设备在执行核函数时使用的并行线程块的数量
add_blockIdx<<<NUM,1>>>( dev_a, dev_b, dev_c );
//copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR(cudaMemcpy(c, dev_c, NUM * sizeof(int), cudaMemcpyDeviceToHost));
//display the results
for (int i=0; i<NUM; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
//free the memory allocated on the GPU
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaFree(dev_b));
HANDLE_ERROR(cudaFree(dev_c));
return 0;
}
int test4()
{
//globals needed by the update routine
struct DataBlock {
unsigned char* dev_bitmap;
};
DataBlock data;
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char* dev_bitmap;
HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap, bitmap.image_size()));
data.dev_bitmap = dev_bitmap;
//声明一个二维的线程格
//类型dim3表示一个三维数组,可以用于指定启动线程块的数量
//当用两个值来初始化dim3类型的变量时,CUDA运行时将自动把第3维的大小指定为1
dim3 grid(DIM, DIM);
kernel_julia<<<grid,1>>>(dev_bitmap);
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(dev_bitmap));
bitmap.display_and_exit();
return 0;
}
int test5()
{
int a[NUM], b[NUM], c[NUM];
int *dev_a = NULL, *dev_b = NULL, *dev_c = NULL;
//在GPU上分配内存
HANDLE_ERROR(cudaMalloc((void**)&dev_a, NUM * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, NUM * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_c, NUM * sizeof(int)));
//在CPU上为数组'a'和'b'赋值
for (int i = 0; i < NUM; i++) {
a[i] = i;
b[i] = i * i;
}
//将数组'a'和'b'复制到GPU
HANDLE_ERROR(cudaMemcpy(dev_a, a, NUM * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, NUM * sizeof(int), cudaMemcpyHostToDevice));
add_threadIdx<<<1, NUM>>>(dev_a, dev_b, dev_c);
//将数组'c'从GPU复制到CPU
HANDLE_ERROR(cudaMemcpy(c, dev_c, NUM * sizeof(int), cudaMemcpyDeviceToHost));
//显示结果
for (int i = 0; i < NUM; i++) {
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
//释放在GPU分配的内存
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
int test6()
{
int a[NUM], b[NUM], c[NUM];
int *dev_a = NULL, *dev_b = NULL, *dev_c = NULL;
//在GPU上分配内存
HANDLE_ERROR(cudaMalloc((void**)&dev_a, NUM * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, NUM * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_c, NUM * sizeof(int)));
//在CPU上为数组'a'和'b'赋值
for (int i = 0; i < NUM; i++) {
a[i] = i;
b[i] = i * i / 10;
}
//将数组'a'和'b'复制到GPU
HANDLE_ERROR(cudaMemcpy(dev_a, a, NUM * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, NUM * sizeof(int), cudaMemcpyHostToDevice));
add_blockIdx_threadIdx<<<128, 128>>>(dev_a, dev_b, dev_c);
//将数组'c'从GPU复制到CPU
HANDLE_ERROR(cudaMemcpy(c, dev_c, NUM * sizeof(int), cudaMemcpyDeviceToHost));
//验证GPU确实完成了我们要求的工作
bool success = true;
for (int i = 0; i < NUM; i++) {
if ((a[i] + b[i]) != c[i]) {
printf("error: %d + %d != %d\n", a[i], b[i], c[i]);
success = false;
}
}
if (success)
printf("we did it!\n");
//释放在GPU分配的内存
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
int test7()
{
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
HANDLE_ERROR(cudaMalloc((void**)&data.dev_bitmap, bitmap.image_size()));
bitmap.anim_and_exit((void(*)(void*,int))generate_frame, (void(*)(void*))cleanup);
return 0;
}
void generate_frame(DataBlock *d, int ticks)
{
dim3 blocks(DIM/16, DIM/16);
dim3 threads(16, 16);
ripple_kernel<<<blocks,threads>>>(d->dev_bitmap, ticks);
HANDLE_ERROR(cudaMemcpy(d->bitmap->get_ptr(), d->dev_bitmap, d->bitmap->image_size(), cudaMemcpyDeviceToHost));
}
//clean up memory allocated on the GPU
void cleanup(DataBlock *d)
{
HANDLE_ERROR(cudaFree(d->dev_bitmap));
}
int test8()
{
float *a, *b, c, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
//allocate memory on the cpu side
a = (float*)malloc(NUM * sizeof(float));
b = (float*)malloc(NUM * sizeof(float));
partial_c = (float*)malloc(blocksPerGrid * sizeof(float));
//allocate the memory on the GPU
HANDLE_ERROR(cudaMalloc((void**)&dev_a, NUM * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, NUM * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&dev_partial_c, blocksPerGrid*sizeof(float)));
//fill in the host memory with data
for (int i = 0; i < NUM; i++) {
a[i] = i;
b[i] = i*2;
}
//copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR(cudaMemcpy(dev_a, a, NUM * sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, NUM * sizeof(float), cudaMemcpyHostToDevice));
dot_kernel<<<blocksPerGrid,threadsPerBlock>>>(dev_a, dev_b, dev_partial_c);
//copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR(cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost));
//finish up on the CPU side
c = 0;
for (int i = 0; i < blocksPerGrid; i++) {
c += partial_c[i];
}
//点积计算结果应该是从0到NUM-1中每个数值的平方再乘以2
//闭合形式解
#define sum_squares(x) (x * (x + 1) * (2 * x + 1) / 6)
printf("Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares((float)(NUM - 1)));
//free memory on the gpu side
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaFree(dev_b));
HANDLE_ERROR(cudaFree(dev_partial_c));
//free memory on the cpu side
free(a);
free(b);
free(partial_c);
return 0;
}
int test9()
{
DataBlock data;
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char *dev_bitmap;
HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap, bitmap.image_size()));
data.dev_bitmap = dev_bitmap;
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16,16);
julia_kernel<<<grids, threads>>>(dev_bitmap);
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(dev_bitmap));
bitmap.display_and_exit();
return 0;
}
int test10()
{
DataBlock data;
//capture the start time
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char *dev_bitmap;
Sphere *s;
//allocate memory on the GPU for the output bitmap
HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap, bitmap.image_size()));
//allocate memory for the Sphere dataset
HANDLE_ERROR(cudaMalloc((void**)&s, sizeof(Sphere) * SPHERES));
//allocate temp memory, initialize it, copy to memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
for (int i = 0; i < SPHERES; i++) {
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
HANDLE_ERROR(cudaMemcpy( s, temp_s, sizeof(Sphere) * SPHERES, cudaMemcpyHostToDevice));
free(temp_s);
//generate a bitmap from our sphere data
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
RayTracing_kernel<<<grids, threads>>>(s, dev_bitmap);
//copy our bitmap back from the GPU for display
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost));
//get stop time, and display the timing results
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("Time to generate: %3.1f ms\n", elapsedTime);
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
HANDLE_ERROR(cudaFree(dev_bitmap));
HANDLE_ERROR(cudaFree(s));
// display
bitmap.display_and_exit();
return 0;
}
int test11()
{
DataBlock data;
//capture the start time
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char *dev_bitmap;
//allocate memory on the GPU for the output bitmap
HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap, bitmap.image_size()));
//allocate temp memory, initialize it, copy to constant memory on the GPU, then free temp memory
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
for (int i = 0; i < SPHERES; i++) {
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
HANDLE_ERROR(cudaMemcpyToSymbol(s, temp_s, sizeof(Sphere) * SPHERES));
free(temp_s);
//generate a bitmap from our sphere data
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
RayTracing_kernel<<<grids, threads>>>(dev_bitmap);
//copy our bitmap back from the GPU for display
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost));
//get stop time, and display the timing results
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("Time to generate: %3.1f ms\n", elapsedTime);
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
HANDLE_ERROR(cudaFree(dev_bitmap));
//display
bitmap.display_and_exit();
return 0;
}
int test12()
{
Heat_DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR(cudaEventCreate(&data.start));
HANDLE_ERROR(cudaEventCreate(&data.stop));
int imageSize = bitmap.image_size();
HANDLE_ERROR(cudaMalloc((void**)&data.output_bitmap, imageSize));
//assume float == 4 chars in size (ie rgba)
HANDLE_ERROR(cudaMalloc((void**)&data.dev_inSrc, imageSize));
HANDLE_ERROR(cudaMalloc((void**)&data.dev_outSrc, imageSize));
HANDLE_ERROR(cudaMalloc((void**)&data.dev_constSrc, imageSize));
HANDLE_ERROR(cudaBindTexture(NULL, texConstSrc, data.dev_constSrc, imageSize));
HANDLE_ERROR(cudaBindTexture(NULL, texIn, data.dev_inSrc, imageSize));
HANDLE_ERROR(cudaBindTexture(NULL, texOut, data.dev_outSrc, imageSize));
//intialize the constant data
float *temp = (float*)malloc(imageSize);
for (int i = 0; i < DIM*DIM; i++) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x>300) && (x<600) && (y>310) && (y<601))
temp[i] = MAX_TEMP;
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y < 900; y++) {
for (int x = 400; x < 500; x++) {
temp[x + y * DIM] = MIN_TEMP;
}
}
HANDLE_ERROR(cudaMemcpy(data.dev_constSrc, temp, imageSize, cudaMemcpyHostToDevice));
//initialize the input data
for (int y = 800; y < DIM; y++) {
for (int x = 0; x < 200; x++) {
temp[x+y*DIM] = MAX_TEMP;
}
}
HANDLE_ERROR(cudaMemcpy(data.dev_inSrc, temp,imageSize, cudaMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit((void (*)(void*,int))Heat_anim_gpu, (void (*)(void*))Heat_anim_exit);
return 0;
}
int test13()
{
Heat_DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR(cudaEventCreate(&data.start));
HANDLE_ERROR(cudaEventCreate(&data.stop));
int imageSize = bitmap.image_size();
HANDLE_ERROR(cudaMalloc((void**)&data.output_bitmap, imageSize));
//assume float == 4 chars in size (ie rgba)
HANDLE_ERROR(cudaMalloc((void**)&data.dev_inSrc, imageSize));
HANDLE_ERROR(cudaMalloc((void**)&data.dev_outSrc, imageSize));
HANDLE_ERROR(cudaMalloc((void**)&data.dev_constSrc, imageSize));
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
HANDLE_ERROR(cudaBindTexture2D(NULL, texConstSrc2, data.dev_constSrc, desc, DIM, DIM, sizeof(float) * DIM));
HANDLE_ERROR(cudaBindTexture2D(NULL, texIn2, data.dev_inSrc, desc, DIM, DIM, sizeof(float) * DIM));
HANDLE_ERROR(cudaBindTexture2D(NULL, texOut2, data.dev_outSrc, desc, DIM, DIM, sizeof(float) * DIM));
//initialize the constant data
float *temp = (float*)malloc(imageSize);
for (int i = 0; i < DIM*DIM; i++) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x > 300) && ( x < 600) && (y > 310) && (y < 601))
temp[i] = MAX_TEMP;
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y < 900; y++) {
for (int x = 400; x < 500; x++) {
temp[x + y * DIM] = MIN_TEMP;
}
}
HANDLE_ERROR(cudaMemcpy(data.dev_constSrc, temp, imageSize, cudaMemcpyHostToDevice));
//initialize the input data
for (int y = 800; y < DIM; y++) {
for (int x = 0; x < 200; x++) {
temp[x + y * DIM] = MAX_TEMP;
}
}
HANDLE_ERROR(cudaMemcpy(data.dev_inSrc, temp,imageSize, cudaMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit((void (*)(void*,int))anim_gpu, (void (*)(void*))anim_exit);
return 0;
}
void Heat_anim_gpu(Heat_DataBlock *d, int ticks)
{
HANDLE_ERROR(cudaEventRecord(d->start, 0));
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
//since tex is global and bound, we have to use a flag to
//select which is in/out per iteration
volatile bool dstOut = true;
for (int i = 0; i < 90; i++) {
float *in, *out;
if (dstOut) {
in = d->dev_inSrc;
out = d->dev_outSrc;
} else {
out = d->dev_inSrc;
in = d->dev_outSrc;
}
Heat_copy_const_kernel<<<blocks, threads>>>(in);
Heat_blend_kernel<<<blocks, threads>>>(out, dstOut);
dstOut = !dstOut;
}
float_to_color<<<blocks, threads>>>(d->output_bitmap, d->dev_inSrc);
HANDLE_ERROR(cudaMemcpy(bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaEventRecord(d->stop, 0));
HANDLE_ERROR(cudaEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, d->start, d->stop));
d->totalTime += elapsedTime;
++d->frames;
printf( "Average Time per frame: %3.1f ms\n", d->totalTime/d->frames );
}
void anim_gpu(Heat_DataBlock *d, int ticks)
{
HANDLE_ERROR(cudaEventRecord(d->start, 0));
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
//since tex is global and bound, we have to use a flag to
//select which is in/out per iteration
volatile bool dstOut = true;
for (int i = 0; i < 90; i++) {
float *in, *out;
if (dstOut) {
in = d->dev_inSrc;
out = d->dev_outSrc;
} else {
out = d->dev_inSrc;
in = d->dev_outSrc;
}
copy_const_kernel<<<blocks, threads>>>(in);
blend_kernel<<<blocks, threads>>>(out, dstOut);
dstOut = !dstOut;
}
float_to_color<<<blocks, threads>>>(d->output_bitmap, d->dev_inSrc);
HANDLE_ERROR(cudaMemcpy(bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaEventRecord(d->stop, 0));
HANDLE_ERROR(cudaEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, d->start, d->stop));
d->totalTime += elapsedTime;
++d->frames;
printf("Average Time per frame: %3.1f ms\n", d->totalTime/d->frames);
}
void Heat_anim_exit(Heat_DataBlock *d)
{
cudaUnbindTexture(texIn);
cudaUnbindTexture(texOut);
cudaUnbindTexture(texConstSrc);
HANDLE_ERROR(cudaFree(d->dev_inSrc));
HANDLE_ERROR(cudaFree(d->dev_outSrc));
HANDLE_ERROR(cudaFree(d->dev_constSrc));
HANDLE_ERROR(cudaEventDestroy(d->start));
HANDLE_ERROR(cudaEventDestroy(d->stop));
}
//clean up memory allocated on the GPU
void anim_exit(Heat_DataBlock *d)
{
cudaUnbindTexture(texIn2);
cudaUnbindTexture(texOut2);
cudaUnbindTexture(texConstSrc2);
HANDLE_ERROR(cudaFree(d->dev_inSrc));
HANDLE_ERROR(cudaFree(d->dev_outSrc));
HANDLE_ERROR(cudaFree(d->dev_constSrc));
HANDLE_ERROR(cudaEventDestroy(d->start));
HANDLE_ERROR(cudaEventDestroy(d->stop));
}
int test14()
{
GPUAnimBitmap bitmap(DIM, DIM, NULL);
bitmap.anim_and_exit((void (*)(uchar4*, void*, int))generate_frame_opengl, NULL);
return 0;
}
int test15()
{
DataBlock_opengl data;
GPUAnimBitmap bitmap(DIM, DIM, &data);
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR(cudaEventCreate(&data.start));
HANDLE_ERROR(cudaEventCreate(&data.stop));
int imageSize = bitmap.image_size();
//assume float == 4 chars in size (ie rgba)
HANDLE_ERROR(cudaMalloc((void**)&data.dev_inSrc, imageSize));
HANDLE_ERROR(cudaMalloc((void**)&data.dev_outSrc, imageSize));
HANDLE_ERROR(cudaMalloc((void**)&data.dev_constSrc, imageSize));
HANDLE_ERROR(cudaBindTexture(NULL, texConstSrc ,data.dev_constSrc, imageSize));
HANDLE_ERROR(cudaBindTexture(NULL, texIn, data.dev_inSrc, imageSize));
HANDLE_ERROR(cudaBindTexture(NULL, texOut, data.dev_outSrc, imageSize));
//intialize the constant data
float *temp = (float*)malloc(imageSize);
for (int i = 0; i < DIM*DIM; i++) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x>300) && (x<600) && (y>310) && (y<601))
temp[i] = MAX_TEMP;
}
temp[DIM*100+100] = (MAX_TEMP + MIN_TEMP)/2;
temp[DIM*700+100] = MIN_TEMP;
temp[DIM*300+300] = MIN_TEMP;
temp[DIM*200+700] = MIN_TEMP;
for (int y = 800; y < 900; y++) {
for (int x = 400; x < 500; x++) {
temp[x+y*DIM] = MIN_TEMP;
}
}
HANDLE_ERROR(cudaMemcpy(data.dev_constSrc, temp, imageSize, cudaMemcpyHostToDevice));
//initialize the input data
for (int y = 800; y < DIM; y++) {
for (int x = 0; x < 200; x++) {
temp[x+y*DIM] = MAX_TEMP;
}
}
HANDLE_ERROR(cudaMemcpy(data.dev_inSrc, temp, imageSize, cudaMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit((void (*)(uchar4*, void*, int))anim_gpu_opengl, (void (*)(void*))anim_exit_opengl);
return 0;
}
void anim_gpu_opengl(uchar4* outputBitmap, DataBlock_opengl *d, int ticks)
{
HANDLE_ERROR(cudaEventRecord(d->start, 0));
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
//since tex is global and bound, we have to use a flag to select which is in/out per iteration
volatile bool dstOut = true;
for (int i = 0; i < 90; i++) {
float *in, *out;
if (dstOut) {
in = d->dev_inSrc;
out = d->dev_outSrc;
} else {
out = d->dev_inSrc;
in = d->dev_outSrc;
}
Heat_copy_const_kernel_opengl<<<blocks, threads>>>(in);
Heat_blend_kernel_opengl<<<blocks, threads>>>(out, dstOut);
dstOut = !dstOut;
}
float_to_color<<<blocks, threads>>>(outputBitmap, d->dev_inSrc);
HANDLE_ERROR(cudaEventRecord(d->stop, 0));
HANDLE_ERROR(cudaEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, d->start, d->stop));
d->totalTime += elapsedTime;
++d->frames;
printf("Average Time per frame: %3.1f ms\n", d->totalTime/d->frames);
}
void anim_exit_opengl(DataBlock_opengl *d)
{
HANDLE_ERROR(cudaUnbindTexture(texIn));
HANDLE_ERROR(cudaUnbindTexture(texOut));
HANDLE_ERROR(cudaUnbindTexture(texConstSrc));
HANDLE_ERROR(cudaFree(d->dev_inSrc));
HANDLE_ERROR(cudaFree(d->dev_outSrc));
HANDLE_ERROR(cudaFree(d->dev_constSrc));
HANDLE_ERROR(cudaEventDestroy(d->start));
HANDLE_ERROR(cudaEventDestroy(d->stop));
}
int test16()
{
unsigned char *buffer = (unsigned char*)big_random_block(SIZE);
//capture the start time starting the timer here so that we include the cost of
//all of the operations on the GPU. if the data were already on the GPU and we just
//timed the kernel the timing would drop from 74 ms to 15 ms. Very fast.
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
// allocate memory on the GPU for the file's data
unsigned char *dev_buffer;
unsigned int *dev_histo;
HANDLE_ERROR(cudaMalloc((void**)&dev_buffer, SIZE));
HANDLE_ERROR(cudaMemcpy(dev_buffer, buffer, SIZE, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void**)&dev_histo, 256 * sizeof(int)));
HANDLE_ERROR(cudaMemset(dev_histo, 0, 256 * sizeof(int)));
//kernel launch - 2x the number of mps gave best timing
cudaDeviceProp prop;
HANDLE_ERROR(cudaGetDeviceProperties(&prop, 0));
int blocks = prop.multiProcessorCount;
histo_kernel<<<blocks*2, 256>>>(dev_buffer, SIZE, dev_histo);
unsigned int histo[256];
HANDLE_ERROR(cudaMemcpy(histo, dev_histo, 256 * sizeof(int), cudaMemcpyDeviceToHost));
//get stop time, and display the timing results
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("Time to generate: %3.1f ms\n", elapsedTime);
long histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += histo[i];
}
printf("Histogram Sum: %ld\n", histoCount);
//verify that we have the same counts via CPU
for (int i = 0; i < SIZE; i++)
histo[buffer[i]]--;
for (int i = 0; i < 256; i++) {
if (histo[i] != 0)
printf("Failure at %d!\n", i);
}
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
cudaFree(dev_histo);
cudaFree(dev_buffer);
free(buffer);
return 0;
}
float cuda_malloc_test(int size, bool up)
{
cudaEvent_t start, stop;
int *a, *dev_a;
float elapsedTime;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
a = (int*)malloc(size * sizeof(*a));
HANDLE_NULL(a);
HANDLE_ERROR(cudaMalloc((void**)&dev_a,size * sizeof(*dev_a)));
HANDLE_ERROR(cudaEventRecord(start, 0));
for (int i=0; i<100; i++) {
if (up)
HANDLE_ERROR(cudaMemcpy(dev_a, a, size * sizeof( *dev_a ), cudaMemcpyHostToDevice));
else
HANDLE_ERROR(cudaMemcpy(a, dev_a, size * sizeof(*dev_a), cudaMemcpyDeviceToHost));
}
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
free(a);
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
return elapsedTime;
}
float cuda_host_alloc_test(int size, bool up)
{
cudaEvent_t start, stop;
int *a, *dev_a;
float elapsedTime;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaHostAlloc((void**)&a,size * sizeof(*a), cudaHostAllocDefault));
HANDLE_ERROR(cudaMalloc((void**)&dev_a, size * sizeof(*dev_a)));
HANDLE_ERROR(cudaEventRecord(start, 0));
for (int i=0; i<100; i++) {
if (up)
HANDLE_ERROR(cudaMemcpy(dev_a, a,size * sizeof(*a), cudaMemcpyHostToDevice));
else
HANDLE_ERROR(cudaMemcpy(a, dev_a,size * sizeof(*a), cudaMemcpyDeviceToHost));
}
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
HANDLE_ERROR(cudaFreeHost(a));
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
return elapsedTime;
}
int test17()
{
float elapsedTime;
float MB = (float)100 * SIZE * sizeof(int) / 1024 / 1024;
//try it with cudaMalloc
elapsedTime = cuda_malloc_test(SIZE, true);
printf("Time using cudaMalloc: %3.1f ms\n", elapsedTime);
printf("\tMB/s during copy up: %3.1f\n", MB/(elapsedTime/1000));
elapsedTime = cuda_malloc_test(SIZE, false);
printf("Time using cudaMalloc: %3.1f ms\n", elapsedTime);
printf("\tMB/s during copy down: %3.1f\n", MB/(elapsedTime/1000));
//now try it with cudaHostAlloc
elapsedTime = cuda_host_alloc_test(SIZE, true);
printf("Time using cudaHostAlloc: %3.1f ms\n", elapsedTime);
printf("\tMB/s during copy up: %3.1f\n", MB/(elapsedTime/1000));
elapsedTime = cuda_host_alloc_test(SIZE, false);
printf("Time using cudaHostAlloc: %3.1f ms\n", elapsedTime);
printf("\tMB/s during copy down: %3.1f\n", MB/(elapsedTime/1000));
return 0;
}
int test18()
{
cudaDeviceProp prop;
int whichDevice;
HANDLE_ERROR(cudaGetDevice(&whichDevice));
HANDLE_ERROR(cudaGetDeviceProperties(&prop, whichDevice));
if (!prop.deviceOverlap) {
printf("Device will not handle overlaps, so no speed up from streams\n");
return 0;
}
cudaEvent_t start, stop;
float elapsedTime;
cudaStream_t stream;
int *host_a, *host_b, *host_c;
int *dev_a, *dev_b, *dev_c;
//start the timers
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
//initialize the stream
HANDLE_ERROR(cudaStreamCreate(&stream));
//allocate the memory on the GPU
HANDLE_ERROR(cudaMalloc((void**)&dev_a, NUM * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, NUM * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_c, NUM * sizeof(int)));
//allocate host locked memory, used to stream
HANDLE_ERROR(cudaHostAlloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
HANDLE_ERROR(cudaHostAlloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
HANDLE_ERROR(cudaHostAlloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
for (int i=0; i<FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
HANDLE_ERROR(cudaEventRecord(start, 0));
//now loop over full data, in bite-sized chunks
for (int i=0; i<FULL_DATA_SIZE; i+= NUM) {
//copy the locked memory to the device, async
HANDLE_ERROR(cudaMemcpyAsync(dev_a, host_a+i, NUM * sizeof(int), cudaMemcpyHostToDevice, stream));
HANDLE_ERROR(cudaMemcpyAsync(dev_b, host_b+i, NUM * sizeof(int), cudaMemcpyHostToDevice, stream));
singlestream_kernel<<<NUM/256, 256, 0, stream>>>(dev_a, dev_b, dev_c);
//copy the data from device to locked memory
HANDLE_ERROR(cudaMemcpyAsync(host_c+i, dev_c, NUM * sizeof(int), cudaMemcpyDeviceToHost, stream));
}
// copy result chunk from locked to full buffer
HANDLE_ERROR(cudaStreamSynchronize(stream));
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("Time taken: %3.1f ms\n", elapsedTime);
//cleanup the streams and memory
HANDLE_ERROR(cudaFreeHost(host_a));
HANDLE_ERROR(cudaFreeHost(host_b));
HANDLE_ERROR(cudaFreeHost(host_c));
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaFree(dev_b));
HANDLE_ERROR(cudaFree(dev_c));
HANDLE_ERROR(cudaStreamDestroy(stream));
return 0;
}
int test19()
{
cudaDeviceProp prop;
int whichDevice;
HANDLE_ERROR(cudaGetDevice(&whichDevice));
HANDLE_ERROR(cudaGetDeviceProperties(&prop, whichDevice));
if (!prop.deviceOverlap) {
printf( "Device will not handle overlaps, so no speed up from streams\n" );
return 0;
}
//start the timers
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
//initialize the streams
cudaStream_t stream0, stream1;
HANDLE_ERROR(cudaStreamCreate(&stream0));
HANDLE_ERROR(cudaStreamCreate(&stream1));
int *host_a, *host_b, *host_c;
int *dev_a0, *dev_b0, *dev_c0;//为第0个流分配的GPU内存
int *dev_a1, *dev_b1, *dev_c1;//为第1个流分配的GPU内存
//allocate the memory on the GPU
HANDLE_ERROR(cudaMalloc((void**)&dev_a0, NUM * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b0, NUM * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_c0, NUM * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_a1, NUM * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b1, NUM * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_c1, NUM * sizeof(int)));
//allocate host locked memory, used to stream
HANDLE_ERROR(cudaHostAlloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
HANDLE_ERROR(cudaHostAlloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
HANDLE_ERROR(cudaHostAlloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
for (int i=0; i<FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
HANDLE_ERROR(cudaEventRecord(start, 0));
//now loop over full data, in bite-sized chunks
for (int i=0; i<FULL_DATA_SIZE; i+= NUM*2) {
//enqueue copies of a in stream0 and stream1
//将锁定内存以异步方式复制到设备上
HANDLE_ERROR(cudaMemcpyAsync(dev_a0, host_a+i, NUM * sizeof(int), cudaMemcpyHostToDevice, stream0));
HANDLE_ERROR(cudaMemcpyAsync(dev_a1, host_a+i+NUM, NUM * sizeof(int), cudaMemcpyHostToDevice, stream1));
//enqueue copies of b in stream0 and stream1
HANDLE_ERROR(cudaMemcpyAsync(dev_b0, host_b+i, NUM * sizeof(int), cudaMemcpyHostToDevice, stream0));
HANDLE_ERROR(cudaMemcpyAsync(dev_b1, host_b+i+NUM, NUM * sizeof(int), cudaMemcpyHostToDevice, stream1));
//enqueue kernels in stream0 and stream1
singlestream_kernel<<<NUM/256, 256, 0, stream0>>>(dev_a0, dev_b0, dev_c0);
singlestream_kernel<<<NUM/256, 256, 0, stream1>>>(dev_a1, dev_b1, dev_c1);
//enqueue copies of c from device to locked memory
HANDLE_ERROR(cudaMemcpyAsync(host_c+i, dev_c0, NUM * sizeof(int), cudaMemcpyDeviceToHost, stream0));
HANDLE_ERROR(cudaMemcpyAsync(host_c+i+NUM, dev_c1, NUM * sizeof(int), cudaMemcpyDeviceToHost, stream1));
}
float elapsedTime;
HANDLE_ERROR(cudaStreamSynchronize(stream0));
HANDLE_ERROR(cudaStreamSynchronize(stream1));
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime,start, stop));
printf( "Time taken: %3.1f ms\n", elapsedTime );
//cleanup the streams and memory
HANDLE_ERROR(cudaFreeHost(host_a));
HANDLE_ERROR(cudaFreeHost(host_b));
HANDLE_ERROR(cudaFreeHost(host_c));
HANDLE_ERROR(cudaFree(dev_a0));
HANDLE_ERROR(cudaFree(dev_b0));
HANDLE_ERROR(cudaFree(dev_c0));
HANDLE_ERROR(cudaFree(dev_a1));
HANDLE_ERROR(cudaFree(dev_b1));
HANDLE_ERROR(cudaFree(dev_c1));
HANDLE_ERROR(cudaStreamDestroy(stream0));
HANDLE_ERROR(cudaStreamDestroy(stream1));
return 0;
}
float malloc_test(int size)
{
cudaEvent_t start, stop;
float *a, *b, c, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
float elapsedTime;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
//allocate memory on the CPU side
a = (float*)malloc(size * sizeof(float));
b = (float*)malloc(size * sizeof(float));
partial_c = (float*)malloc(blocksPerGrid * sizeof(float));
//allocate the memory on the GPU
HANDLE_ERROR(cudaMalloc((void**)&dev_a, size * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, size * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&dev_partial_c, blocksPerGrid * sizeof(float)));
//fill in the host memory with data
for (int i=0; i<size; i++) {
a[i] = i;
b[i] = i * 2;
}
HANDLE_ERROR(cudaEventRecord(start, 0));
//copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR(cudaMemcpy(dev_a, a, size * sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, size * sizeof(float), cudaMemcpyHostToDevice));
dot_kernel<<<blocksPerGrid, threadsPerBlock>>>(size, dev_a, dev_b, dev_partial_c);
//copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR(cudaMemcpy(partial_c, dev_partial_c,blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime,start, stop));
//finish up on the CPU side
c = 0;
for (int i=0; i<blocksPerGrid; i++) {
c += partial_c[i];
}
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaFree(dev_b));
HANDLE_ERROR(cudaFree(dev_partial_c));
//free memory on the CPU side
free(a);
free(b);
free(partial_c);
//free events
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
printf("Value calculated: %f\n", c);
return elapsedTime;
}
float cuda_host_alloc_test(int size)
{
cudaEvent_t start, stop;
float *a, *b, c, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
float elapsedTime;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
//allocate the memory on the CPU
HANDLE_ERROR(cudaHostAlloc((void**)&a, size*sizeof(float), cudaHostAllocWriteCombined |cudaHostAllocMapped));
HANDLE_ERROR(cudaHostAlloc((void**)&b, size*sizeof(float), cudaHostAllocWriteCombined |cudaHostAllocMapped));
HANDLE_ERROR(cudaHostAlloc((void**)&partial_c, blocksPerGrid*sizeof(float), cudaHostAllocMapped));
//find out the GPU pointers
HANDLE_ERROR(cudaHostGetDevicePointer(&dev_a, a, 0));
HANDLE_ERROR(cudaHostGetDevicePointer(&dev_b, b, 0));
HANDLE_ERROR( cudaHostGetDevicePointer(&dev_partial_c, partial_c, 0));
//fill in the host memory with data
for (int i=0; i<size; i++) {
a[i] = i;
b[i] = i*2;
}
HANDLE_ERROR(cudaEventRecord(start, 0));
dot_kernel<<<blocksPerGrid, threadsPerBlock>>>(size, dev_a, dev_b, dev_partial_c);
HANDLE_ERROR(cudaThreadSynchronize());
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime,start, stop));
//finish up on the CPU side
c = 0;
for (int i=0; i<blocksPerGrid; i++) {
c += partial_c[i];
}
HANDLE_ERROR(cudaFreeHost(a));
HANDLE_ERROR(cudaFreeHost(b));
HANDLE_ERROR(cudaFreeHost(partial_c));
// free events
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
printf("Value calculated: %f\n", c);
return elapsedTime;
}
int test20()
{
cudaDeviceProp prop;
int whichDevice;
HANDLE_ERROR(cudaGetDevice(&whichDevice));
HANDLE_ERROR(cudaGetDeviceProperties(&prop, whichDevice));
if (prop.canMapHostMemory != 1) {
printf( "Device can not map memory.\n" );
return 0;
}
HANDLE_ERROR(cudaSetDeviceFlags(cudaDeviceMapHost));
//try it with malloc
float elapsedTime = malloc_test(NUM);
printf("Time using cudaMalloc: %3.1f ms\n", elapsedTime);
//now try it with cudaHostAlloc
elapsedTime = cuda_host_alloc_test(NUM);
printf("Time using cudaHostAlloc: %3.1f ms\n", elapsedTime);
return 0;
}
void* routine(void *pvoidData)
{
DataStruct *data = (DataStruct*)pvoidData;
HANDLE_ERROR(cudaSetDevice(data->deviceID));
int size = data->size;
float *a, *b, c, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
//allocate memory on the CPU side
a = data->a;
b = data->b;
partial_c = (float*)malloc(blocksPerGrid * sizeof(float));
//allocate the memory on the GPU
HANDLE_ERROR(cudaMalloc((void**)&dev_a, size * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, size * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&dev_partial_c, blocksPerGrid*sizeof(float)));
//copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR(cudaMemcpy(dev_a, a, size*sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, size*sizeof(float), cudaMemcpyHostToDevice));
dot_kernel<<<blocksPerGrid, threadsPerBlock>>>(size, dev_a, dev_b, dev_partial_c);
//copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR(cudaMemcpy( partial_c, dev_partial_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost));
//finish up on the CPU side
c = 0;
for (int i=0; i<blocksPerGrid; i++) {
c += partial_c[i];
}
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaFree(dev_b));
HANDLE_ERROR(cudaFree(dev_partial_c));
//free memory on the CPU side
free(partial_c);
data->returnValue = c;
return 0;
}
int test21()
{
int deviceCount;
HANDLE_ERROR(cudaGetDeviceCount(&deviceCount));
if (deviceCount < 2) {
printf("We need at least two compute 1.0 or greater devices, but only found %d\n", deviceCount);
return 0;
}
float *a = (float*)malloc(sizeof(float) * NUM);
HANDLE_NULL(a);
float *b = (float*)malloc(sizeof(float) * NUM);
HANDLE_NULL(b);
//fill in the host memory with data
for (int i=0; i<NUM; i++) {
a[i] = i;
b[i] = i*2;
}
//prepare for multithread
DataStruct data[2];
data[0].deviceID = 0;
data[0].size = NUM/2;
data[0].a = a;
data[0].b = b;
data[1].deviceID = 1;
data[1].size = NUM/2;
data[1].a = a + NUM/2;
data[1].b = b + NUM/2;
CUTThread thread = start_thread(routine, &(data[0]));
routine(&(data[1]));
end_thread(thread);
//free memory on the CPU side
free(a);
free(b);
printf("Value calculated: %f\n", data[0].returnValue + data[1].returnValue);
return 0;
}
|
121ace40d1907b40b4aeefcfd9989d343b7d7a03.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#define SIZE (16)
// Kernel definition, see also section 4.2.3 of Nvidia Cuda Programming Guide
__global__ void arrAdd(float *A, float *B, float *C)
{
// threadIdx.x is a built-in variable provided by CUDA at runtime
// It represents the thread index inside the block
int id = threadIdx.x; // id: unique thread identifier
C[id] = A[id] + B[id];
}
int main(void)
{
float A[SIZE], B[SIZE], C[SIZE];
float *devPtrA;
float *devPtrB;
float *devPtrC;
int memsize = SIZE * sizeof(float);
// Initialize arrays
srand (time(NULL));
for(int i=0; i < SIZE; i++)
{
A[i]=rand() % 100;
B[i]=rand() % 100;
}
hipSetDevice(0); // Select GPU device (can be 0 to 3)
// Allocate device memory for A, B and C arrays
hipMalloc((void**)&devPtrA, memsize);
hipMalloc((void**)&devPtrB, memsize);
hipMalloc((void**)&devPtrC, memsize);
// Copy data (data to process) from host to device (from CPU to GPU)
hipMemcpy(devPtrA, A, memsize, hipMemcpyHostToDevice);
hipMemcpy(devPtrB, B, memsize, hipMemcpyHostToDevice);
// Execute the Kernell
hipLaunchKernelGGL(( arrAdd) , dim3(1), dim3(SIZE), 0, 0, devPtrA, devPtrB, devPtrC); // launch 1 block with SIZE threads
// Copy data from device (results) back to host
hipMemcpy(C, devPtrC, memsize, hipMemcpyDeviceToHost);
// Show results
printf(" A B C\n");
for (int i=0; i < SIZE; i++)
{
printf("%2d: %4.1f + %4.1f = %5.1f\n", i, A[i], B[i], C[i]);
}
// Free device memory
hipFree(devPtrA);
hipFree(devPtrB);
hipFree(devPtrC);
}
| 121ace40d1907b40b4aeefcfd9989d343b7d7a03.cu | #include <stdio.h>
#include <time.h>
#define SIZE (16)
// Kernel definition, see also section 4.2.3 of Nvidia Cuda Programming Guide
__global__ void arrAdd(float *A, float *B, float *C)
{
// threadIdx.x is a built-in variable provided by CUDA at runtime
// It represents the thread index inside the block
int id = threadIdx.x; // id: unique thread identifier
C[id] = A[id] + B[id];
}
int main(void)
{
float A[SIZE], B[SIZE], C[SIZE];
float *devPtrA;
float *devPtrB;
float *devPtrC;
int memsize = SIZE * sizeof(float);
// Initialize arrays
srand (time(NULL));
for(int i=0; i < SIZE; i++)
{
A[i]=rand() % 100;
B[i]=rand() % 100;
}
cudaSetDevice(0); // Select GPU device (can be 0 to 3)
// Allocate device memory for A, B and C arrays
cudaMalloc((void**)&devPtrA, memsize);
cudaMalloc((void**)&devPtrB, memsize);
cudaMalloc((void**)&devPtrC, memsize);
// Copy data (data to process) from host to device (from CPU to GPU)
cudaMemcpy(devPtrA, A, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(devPtrB, B, memsize, cudaMemcpyHostToDevice);
// Execute the Kernell
arrAdd <<<1, SIZE>>> (devPtrA, devPtrB, devPtrC); // launch 1 block with SIZE threads
// Copy data from device (results) back to host
cudaMemcpy(C, devPtrC, memsize, cudaMemcpyDeviceToHost);
// Show results
printf(" A B C\n");
for (int i=0; i < SIZE; i++)
{
printf("%2d: %4.1f + %4.1f = %5.1f\n", i, A[i], B[i], C[i]);
}
// Free device memory
cudaFree(devPtrA);
cudaFree(devPtrB);
cudaFree(devPtrC);
}
|
c663efd088f100f76986c6300c2ed0c083b44881.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
# include "../shared/timing.h"
# include <stdlib.h>
# include <stdio.h>
# include <math.h>
# define BSIZE 16
// device code for block MxM
// Omit the d_ prefix because all variables are on device
__global__ void MxM_block(double* A, double* B, double* C, const int N) {
// store sub-matrix in the shared memory
// They should be in the L1 cache of each streaming processor
__shared__ double A_tile[BSIZE][BSIZE];
__shared__ double B_tile[BSIZE][BSIZE];
__shared__ double C_tile[BSIZE][BSIZE];
// each thread computes one element of the block sub-matrix
// initialize the element to 0
C_tile[threadIdx.y][threadIdx.x] = 0.0;
// pseudo code:
// C[i1:i2][j1:j2] += A[i1:i2][k1:k2] * B[k1:k2][j1:j2]
// For each block, i,j is known:
// (i ranges from i1 to i2, j ranges from j1 to j2)
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
// Only k is changing, so we loop over k:
// each iteration (k1,k2) -> (k1+BSIZE,k2+BSIZE)
for (int tileIdx = 0; tileIdx < N/BSIZE; tileIdx++){
// Load the matrices from global memory to shared memory
// each thread loads one element
// note that we use 1D index for global matrix but 2D index for a tile
int k_A = tileIdx*BSIZE+threadIdx.x; // k is the row index for A
int k_B = tileIdx*BSIZE+threadIdx.y; // k is the column index for B
A_tile[threadIdx.y][threadIdx.x] = A[i*N + k_A];
B_tile[threadIdx.y][threadIdx.x] = B[k_B*N + j];
// Synchronize to make sure the entire tile is loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element of the block sub-matrix
for (int k = 0; k < BSIZE; k++) {
C_tile[threadIdx.y][threadIdx.x] +=
A_tile[threadIdx.y][k] * B_tile[k][threadIdx.x];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// write back to global memory
// each thread writes one element
C[i*N + j] = C_tile[threadIdx.y][threadIdx.x];
// Final note:
// It seems that using C_temp (a scalar)
// instead of C_tile[threadIdx.y, threadIdx.x]
// is more memory-efficient.
// because C_temp can be stored in the register of each thread.
// So we can save more shared memory for A_tile and B_tile
// But to follow the homework instruction we still allocate
// shared memory for C_tile
}
// host code is almost the same as the naive one
int main() {
// set up problem size
int N = pow(2, 10);
int size = N * N;
// malloc host memory and initialize data
double* h_A = (double*)malloc(sizeof(double) * size);
double* h_B = (double*)malloc(sizeof(double) * size);
double* h_C = (double*)malloc(sizeof(double) * size);
for (int i = 0; i < size; i++) {
h_A[i] = 1.0;
h_B[i] = 1.0;
}
// timing all the device operations
double iStart, iElaps;
iStart = seconds();
// malloc device global memory and transfer data from host to device
double *d_A, *d_B, *d_C;
hipMalloc((void **)&d_A, size * sizeof(double));
hipMalloc((void **)&d_B, size * sizeof(double));
hipMalloc((void **)&d_C, size * sizeof(double));
hipMemcpy(d_A, h_A, size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size*sizeof(double), hipMemcpyHostToDevice);
// Invoke kernel at host side
// One thread-block calculates on block of matrix C
// Inside the block, one thread handles one element
dim3 dimBlock(BSIZE, BSIZE);
dim3 dimGrid(N/dimBlock.x, N/dimBlock.y);
// execute the kernel function
hipLaunchKernelGGL(( MxM_block), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
// copy kernel result back to host side
hipMemcpy(h_C, d_C, size*sizeof(double), hipMemcpyDeviceToHost);
// end of timing
iElaps = seconds() - iStart;
// print the results
printf("First element = %f \n", h_C[0]);
printf("Time elapse = %f ms \n", iElaps*1000.0);
// clean up
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return(0);
}
| c663efd088f100f76986c6300c2ed0c083b44881.cu | # include "../shared/timing.h"
# include <stdlib.h>
# include <stdio.h>
# include <math.h>
# define BSIZE 16
// device code for block MxM
// Omit the d_ prefix because all variables are on device
__global__ void MxM_block(double* A, double* B, double* C, const int N) {
// store sub-matrix in the shared memory
// They should be in the L1 cache of each streaming processor
__shared__ double A_tile[BSIZE][BSIZE];
__shared__ double B_tile[BSIZE][BSIZE];
__shared__ double C_tile[BSIZE][BSIZE];
// each thread computes one element of the block sub-matrix
// initialize the element to 0
C_tile[threadIdx.y][threadIdx.x] = 0.0;
// pseudo code:
// C[i1:i2][j1:j2] += A[i1:i2][k1:k2] * B[k1:k2][j1:j2]
// For each block, i,j is known:
// (i ranges from i1 to i2, j ranges from j1 to j2)
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
// Only k is changing, so we loop over k:
// each iteration (k1,k2) -> (k1+BSIZE,k2+BSIZE)
for (int tileIdx = 0; tileIdx < N/BSIZE; tileIdx++){
// Load the matrices from global memory to shared memory
// each thread loads one element
// note that we use 1D index for global matrix but 2D index for a tile
int k_A = tileIdx*BSIZE+threadIdx.x; // k is the row index for A
int k_B = tileIdx*BSIZE+threadIdx.y; // k is the column index for B
A_tile[threadIdx.y][threadIdx.x] = A[i*N + k_A];
B_tile[threadIdx.y][threadIdx.x] = B[k_B*N + j];
// Synchronize to make sure the entire tile is loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element of the block sub-matrix
for (int k = 0; k < BSIZE; k++) {
C_tile[threadIdx.y][threadIdx.x] +=
A_tile[threadIdx.y][k] * B_tile[k][threadIdx.x];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// write back to global memory
// each thread writes one element
C[i*N + j] = C_tile[threadIdx.y][threadIdx.x];
// Final note:
// It seems that using C_temp (a scalar)
// instead of C_tile[threadIdx.y, threadIdx.x]
// is more memory-efficient.
// because C_temp can be stored in the register of each thread.
// So we can save more shared memory for A_tile and B_tile
// But to follow the homework instruction we still allocate
// shared memory for C_tile
}
// host code is almost the same as the naive one
int main() {
// set up problem size
int N = pow(2, 10);
int size = N * N;
// malloc host memory and initialize data
double* h_A = (double*)malloc(sizeof(double) * size);
double* h_B = (double*)malloc(sizeof(double) * size);
double* h_C = (double*)malloc(sizeof(double) * size);
for (int i = 0; i < size; i++) {
h_A[i] = 1.0;
h_B[i] = 1.0;
}
// timing all the device operations
double iStart, iElaps;
iStart = seconds();
// malloc device global memory and transfer data from host to device
double *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, size * sizeof(double));
cudaMalloc((void **)&d_B, size * sizeof(double));
cudaMalloc((void **)&d_C, size * sizeof(double));
cudaMemcpy(d_A, h_A, size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size*sizeof(double), cudaMemcpyHostToDevice);
// Invoke kernel at host side
// One thread-block calculates on block of matrix C
// Inside the block, one thread handles one element
dim3 dimBlock(BSIZE, BSIZE);
dim3 dimGrid(N/dimBlock.x, N/dimBlock.y);
// execute the kernel function
MxM_block<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, N);
// copy kernel result back to host side
cudaMemcpy(h_C, d_C, size*sizeof(double), cudaMemcpyDeviceToHost);
// end of timing
iElaps = seconds() - iStart;
// print the results
printf("First element = %f \n", h_C[0]);
printf("Time elapse = %f ms \n", iElaps*1000.0);
// clean up
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return(0);
}
|
3d95cf80cec6b790043d57d3e2ef5349f863c5b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<math.h>
#include<iostream>
#include "gloveparser.cuh"
#include <stdio.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "point.h"
#include <time.h>
#include "simpleLinearScan.cuh"
#include "optimizedLinearScan.cuh"
#include "simHash.cuh"
#include "resultWriter.h"
#include "memOptimizedLinearScan.cuh"
#include "launchHelper.cuh"
#include "validation.h"
#include "weightedMinHash.cuh"
#include "simHashJL.cuh"
#include "launchHelper.cuh"
#include "lshFramework.cuh"
char* implementations[6] = { "OptimizedLinearScan", "MemOptimizedLinearScan", "SimHashLinearScan", "WeightedMinHash", "OneBit - WeightedMinHash", "SimHash Johnson Lindenstrauss"};
Result linearScans(int implementation, int k, int d, int N_query, int N_data, float* data, float* queries, int sketchDim, int distanceFunc) {
Result res;
switch (implementation)
{
case 1:
res = runOptimizedLinearScan(k, d, N_query, N_data, data, queries);
break;
case 2:
res = runMemOptimizedLinearScan(k, d, N_query, N_data, data, queries, distanceFunc);
break;
case 3:
res = simHash::runSimHashLinearScan(k, d, sketchDim, N_query, N_data, data, queries);
break;
case 4:
case 5:
res = weightedMinHash::runMinHash(k, d, sketchDim, N_query, N_data, data, queries, implementation);
break;
case 6:
res = simHashJl::runSimHashJLLinearScan(k, d, sketchDim, N_query, N_data, data, queries);
break;
default:
printf("Invalid implementation selected. \n");
//exit(-1);
break; //?
}
return res;
}
template<class T, class K>
Result executeLSH(LaunchDTO<T> params, LshLaunchDTO<K> lshParams) {
return runLsh(params, lshParams);
}
template<class T>
Result LshPipeline(LaunchDTO<T> params, int keysImplementation, int bucketKeyBits, int tables, bool runWithSketchedData) {
switch (keysImplementation)
{
case 3:
return executeLSH(params, setupLshLaunchDTO<unsigned short>(keysImplementation, bucketKeyBits, tables, params.N_data, params.N_queries, runWithSketchedData));
break;
case 4:
return executeLSH(params, setupLshLaunchDTO<unsigned char>(keysImplementation, bucketKeyBits, tables, params.N_data, params.N_queries, runWithSketchedData));
break;
case 5:
return executeLSH(params, setupLshLaunchDTO<unsigned short>(keysImplementation, bucketKeyBits, tables, params.N_data, params.N_queries, runWithSketchedData));
break;
case 6:
printf("Invalid implementation selected for LSH. \n");
break;
case 7:
return executeLSH(params, setupLshLaunchDTO<unsigned short>(keysImplementation, bucketKeyBits, tables, params.N_data, params.N_queries, runWithSketchedData));
default:
printf("Invalid implementation selected for LSH. \n");
//exit(-1);
break; //?
}
}
Result LSH(int implementation, int keysImplementation, int k, int d, int N_query, int N_data, float* data, float* queries, int sketchDim, int distanceFunc, int bucketKeyBits, int tables, bool runWithSketchedData) {
switch (implementation)
{
case 2:
return LshPipeline(setupLaunchDTO<float>(implementation, distanceFunc, k, d, sketchDim, N_query, N_data, data, queries), keysImplementation, bucketKeyBits, tables, runWithSketchedData);
break;
case 3:
return LshPipeline(setupLaunchDTO<unsigned int>(implementation, distanceFunc, k, d, sketchDim, N_query, N_data, data, queries), keysImplementation, bucketKeyBits, tables, runWithSketchedData);
break;
case 4:
return LshPipeline(setupLaunchDTO<unsigned char>(implementation, distanceFunc, k, d, sketchDim, N_query, N_data, data, queries), keysImplementation, bucketKeyBits, tables, runWithSketchedData);
break;
case 5:
return LshPipeline(setupLaunchDTO<unsigned int>(implementation, distanceFunc, k, d, sketchDim, N_query, N_data, data, queries), keysImplementation, bucketKeyBits, tables, runWithSketchedData);
break;
case 6:
return LshPipeline(setupLaunchDTO<float>(implementation, distanceFunc, k, d, sketchDim, N_query, N_data, data, queries), keysImplementation, bucketKeyBits, tables, runWithSketchedData);
break;
default:
printf("Invalid implementation selected for LSH. \n");
//exit(-1);
break; //?
}
return;
}
int main(int argc, char **argv)
{
//In arguments.
char* filepath_data = argv[1];
char* filepath_queries = argv[2];
char* filepath_truth = argv[3];
int shouldRunValidation = atoi(argv[4]);
int writeRes = atoi(argv[5]); //1 for yes, 0 for no.
char* _k = argv[6];
int implementation = atoi(argv[7]);
int reportK = atoi(_k);
int k = calculateK(reportK);
int sketchDim = atoi(argv[8]);
int distanceFunc = atoi(argv[9]);
int framework = atoi(argv[10]);
int bucketKeyBits = atoi(argv[11]);
int tables = atoi(argv[12]);
int keysImplementation = atoi(argv[13]);
bool runWithSketchedData = (bool)atoi(argv[14]);
char* result_file_path = argv[15];
int N_data = 0;
int N_query = 0;
int d = 0;
float *queries;
float *data;
clock_t before = clock();
queries = parseFile(filepath_queries, N_query, d);
data = parseFile(filepath_data, N_data, d);
clock_t time_lapsed = clock() - before;
printf("Time to read data files: %d \n", (time_lapsed * 1000 / CLOCKS_PER_SEC));
printf("Done parsing files. \n");
printf("N_Query = %d \n", N_query);
printf("N_Data = %d \n", N_data);
printf("k is set to: %d\n", k);
printf("Write res is set to %s \n", writeRes ? "True" : "False");
printf("Validation is set to %s \n", shouldRunValidation ? "True" : "False");
printf("Truth file is %s \n", filepath_truth);
printf("Implementation selected = %s\n", implementations[implementation - 1]);
//DTO's purely used for result writing.
LaunchDTO<int> defaultDTO = setupLaunchDTO<int>(implementation, distanceFunc, k, d, sketchDim, N_query, N_data, data, queries);
LshLaunchDTO<int> defaultLSHDTO = setupLshLaunchDTO<int>(keysImplementation, bucketKeyBits, tables, N_data, N_query, runWithSketchedData);
Result res;
if (framework == 0) {
res = linearScans(implementation, k, d, N_query, N_data, data, queries, sketchDim, distanceFunc);
}
else {
res = LSH(implementation, keysImplementation, k, d, N_query, N_data, data, queries, sketchDim, distanceFunc, bucketKeyBits, tables, runWithSketchedData);
}
float* container = (float*)malloc(2 * sizeof(float)); //Dummy to avoid LINK ERR on .cu to .cpp files
if (shouldRunValidation) {
printf("Running Validation: \n");
//runValidationFromLargeFile(filepath_truth, container, res.results, N_query, k, reportK);
runValidation(filepath_truth, container, res.results, N_query, k, reportK);
}
res.recall = container[0];
res.avgDistance = container[1];
if (writeRes) {
printf("Writing results: \n");
writeResult(res.results, k, N_query, reportK);
}
writePerformanceResults(res, defaultDTO, defaultLSHDTO, result_file_path);
free(queries);
free(data);
printf("Success. Program exiting. \n");
free(res.results);
return 0;
}
| 3d95cf80cec6b790043d57d3e2ef5349f863c5b5.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<math.h>
#include<iostream>
#include "gloveparser.cuh"
#include <stdio.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "point.h"
#include <time.h>
#include "simpleLinearScan.cuh"
#include "optimizedLinearScan.cuh"
#include "simHash.cuh"
#include "resultWriter.h"
#include "memOptimizedLinearScan.cuh"
#include "launchHelper.cuh"
#include "validation.h"
#include "weightedMinHash.cuh"
#include "simHashJL.cuh"
#include "launchHelper.cuh"
#include "lshFramework.cuh"
char* implementations[6] = { "OptimizedLinearScan", "MemOptimizedLinearScan", "SimHashLinearScan", "WeightedMinHash", "OneBit - WeightedMinHash", "SimHash Johnson Lindenstrauss"};
Result linearScans(int implementation, int k, int d, int N_query, int N_data, float* data, float* queries, int sketchDim, int distanceFunc) {
Result res;
switch (implementation)
{
case 1:
res = runOptimizedLinearScan(k, d, N_query, N_data, data, queries);
break;
case 2:
res = runMemOptimizedLinearScan(k, d, N_query, N_data, data, queries, distanceFunc);
break;
case 3:
res = simHash::runSimHashLinearScan(k, d, sketchDim, N_query, N_data, data, queries);
break;
case 4:
case 5:
res = weightedMinHash::runMinHash(k, d, sketchDim, N_query, N_data, data, queries, implementation);
break;
case 6:
res = simHashJl::runSimHashJLLinearScan(k, d, sketchDim, N_query, N_data, data, queries);
break;
default:
printf("Invalid implementation selected. \n");
//exit(-1);
break; //?
}
return res;
}
template<class T, class K>
Result executeLSH(LaunchDTO<T> params, LshLaunchDTO<K> lshParams) {
return runLsh(params, lshParams);
}
template<class T>
Result LshPipeline(LaunchDTO<T> params, int keysImplementation, int bucketKeyBits, int tables, bool runWithSketchedData) {
switch (keysImplementation)
{
case 3:
return executeLSH(params, setupLshLaunchDTO<unsigned short>(keysImplementation, bucketKeyBits, tables, params.N_data, params.N_queries, runWithSketchedData));
break;
case 4:
return executeLSH(params, setupLshLaunchDTO<unsigned char>(keysImplementation, bucketKeyBits, tables, params.N_data, params.N_queries, runWithSketchedData));
break;
case 5:
return executeLSH(params, setupLshLaunchDTO<unsigned short>(keysImplementation, bucketKeyBits, tables, params.N_data, params.N_queries, runWithSketchedData));
break;
case 6:
printf("Invalid implementation selected for LSH. \n");
break;
case 7:
return executeLSH(params, setupLshLaunchDTO<unsigned short>(keysImplementation, bucketKeyBits, tables, params.N_data, params.N_queries, runWithSketchedData));
default:
printf("Invalid implementation selected for LSH. \n");
//exit(-1);
break; //?
}
}
Result LSH(int implementation, int keysImplementation, int k, int d, int N_query, int N_data, float* data, float* queries, int sketchDim, int distanceFunc, int bucketKeyBits, int tables, bool runWithSketchedData) {
switch (implementation)
{
case 2:
return LshPipeline(setupLaunchDTO<float>(implementation, distanceFunc, k, d, sketchDim, N_query, N_data, data, queries), keysImplementation, bucketKeyBits, tables, runWithSketchedData);
break;
case 3:
return LshPipeline(setupLaunchDTO<unsigned int>(implementation, distanceFunc, k, d, sketchDim, N_query, N_data, data, queries), keysImplementation, bucketKeyBits, tables, runWithSketchedData);
break;
case 4:
return LshPipeline(setupLaunchDTO<unsigned char>(implementation, distanceFunc, k, d, sketchDim, N_query, N_data, data, queries), keysImplementation, bucketKeyBits, tables, runWithSketchedData);
break;
case 5:
return LshPipeline(setupLaunchDTO<unsigned int>(implementation, distanceFunc, k, d, sketchDim, N_query, N_data, data, queries), keysImplementation, bucketKeyBits, tables, runWithSketchedData);
break;
case 6:
return LshPipeline(setupLaunchDTO<float>(implementation, distanceFunc, k, d, sketchDim, N_query, N_data, data, queries), keysImplementation, bucketKeyBits, tables, runWithSketchedData);
break;
default:
printf("Invalid implementation selected for LSH. \n");
//exit(-1);
break; //?
}
return;
}
int main(int argc, char **argv)
{
//In arguments.
char* filepath_data = argv[1];
char* filepath_queries = argv[2];
char* filepath_truth = argv[3];
int shouldRunValidation = atoi(argv[4]);
int writeRes = atoi(argv[5]); //1 for yes, 0 for no.
char* _k = argv[6];
int implementation = atoi(argv[7]);
int reportK = atoi(_k);
int k = calculateK(reportK);
int sketchDim = atoi(argv[8]);
int distanceFunc = atoi(argv[9]);
int framework = atoi(argv[10]);
int bucketKeyBits = atoi(argv[11]);
int tables = atoi(argv[12]);
int keysImplementation = atoi(argv[13]);
bool runWithSketchedData = (bool)atoi(argv[14]);
char* result_file_path = argv[15];
int N_data = 0;
int N_query = 0;
int d = 0;
float *queries;
float *data;
clock_t before = clock();
queries = parseFile(filepath_queries, N_query, d);
data = parseFile(filepath_data, N_data, d);
clock_t time_lapsed = clock() - before;
printf("Time to read data files: %d \n", (time_lapsed * 1000 / CLOCKS_PER_SEC));
printf("Done parsing files. \n");
printf("N_Query = %d \n", N_query);
printf("N_Data = %d \n", N_data);
printf("k is set to: %d\n", k);
printf("Write res is set to %s \n", writeRes ? "True" : "False");
printf("Validation is set to %s \n", shouldRunValidation ? "True" : "False");
printf("Truth file is %s \n", filepath_truth);
printf("Implementation selected = %s\n", implementations[implementation - 1]);
//DTO's purely used for result writing.
LaunchDTO<int> defaultDTO = setupLaunchDTO<int>(implementation, distanceFunc, k, d, sketchDim, N_query, N_data, data, queries);
LshLaunchDTO<int> defaultLSHDTO = setupLshLaunchDTO<int>(keysImplementation, bucketKeyBits, tables, N_data, N_query, runWithSketchedData);
Result res;
if (framework == 0) {
res = linearScans(implementation, k, d, N_query, N_data, data, queries, sketchDim, distanceFunc);
}
else {
res = LSH(implementation, keysImplementation, k, d, N_query, N_data, data, queries, sketchDim, distanceFunc, bucketKeyBits, tables, runWithSketchedData);
}
float* container = (float*)malloc(2 * sizeof(float)); //Dummy to avoid LINK ERR on .cu to .cpp files
if (shouldRunValidation) {
printf("Running Validation: \n");
//runValidationFromLargeFile(filepath_truth, container, res.results, N_query, k, reportK);
runValidation(filepath_truth, container, res.results, N_query, k, reportK);
}
res.recall = container[0];
res.avgDistance = container[1];
if (writeRes) {
printf("Writing results: \n");
writeResult(res.results, k, N_query, reportK);
}
writePerformanceResults(res, defaultDTO, defaultLSHDTO, result_file_path);
free(queries);
free(data);
printf("Success. Program exiting. \n");
free(res.results);
return 0;
}
|
1098cca03fb918885d3b7919f44cf299a73c964e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
static __device__ float E = 2.718281828;
__global__ void multiplyElementKernel(float *src1, float *src2, float *dst, int block_size)
{
int di = blockIdx.x * block_size + threadIdx.x;
dst[di] = src1[di] * src2[di];
} | 1098cca03fb918885d3b7919f44cf299a73c964e.cu | #include "includes.h"
static __device__ float E = 2.718281828;
__global__ void multiplyElementKernel(float *src1, float *src2, float *dst, int block_size)
{
int di = blockIdx.x * block_size + threadIdx.x;
dst[di] = src1[di] * src2[di];
} |
c9fc9c80118ddf15e6a412a175ba83a1f3127654.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zgecsr5mv.cu, normal z -> c, Mon Jun 25 18:24:25 2018
@author Weifeng Liu
*/
// CSR5 SpMV kernel
// see paper by W. Liu and B. Vinter. (2015).
// "CSR5: An Efficient Storage Format for Cross-Platform
// Sparse Matrix-Vector Multiplication".
// 29th ACM International Conference on Supercomputing (ICS15). pp. 339-350.
#include "magmasparse_internal.h"
#include "atomicopsmagmaFloatComplex.h"
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#define MAGMA_CSR5_THREAD_GROUP 128
#define MAGMA_CSR5_THREAD_BUNCH 32
#if (defined( TORCH_HIP_VERSION ) && ( TORCH_HIP_VERSION >= 8000 ))
__inline__ __device__ void
sum_32(
magmaFloatComplex *s_sum,
const int local_id)
{
if (local_id < 16) s_sum[local_id] += s_sum[local_id + 16];
if (local_id < 8) s_sum[local_id] += s_sum[local_id + 8];
if (local_id < 4) s_sum[local_id] += s_sum[local_id + 4];
if (local_id < 2) s_sum[local_id] += s_sum[local_id + 2];
if (local_id < 1) s_sum[local_id] += s_sum[local_id + 1];
}
__inline__ __device__ void
scan_32(
magmaFloatComplex *s_scan,
const int local_id)
{
int ai, bi;
const int baseai = 2 * local_id + 1;
const int basebi = baseai + 1;
magmaFloatComplex temp;
if (local_id < 16) { ai = baseai - 1; bi = basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id == 0) { s_scan[31] = s_scan[15]; s_scan[15] = MAGMA_C_ZERO; }
if (local_id < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
if (local_id < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
if (local_id < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
if (local_id < 16) { ai = baseai - 1; bi = basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
}
__inline__ __device__ magmaFloatComplex
candidate(
magmaFloatComplex *d_value_tile,
magmaFloatComplex *d_x,
const magma_index_t *d_column_index_tile,
const magma_index_t candidate_index,
const magmaFloatComplex alpha)
{
magmaFloatComplex x = MAGMA_C_ZERO;
#if __CUDA_ARCH__ >= 350
x = __ldg(&d_x[d_column_index_tile[candidate_index]]);
#else
x = d_x[d_column_index_tile[candidate_index]];
#endif
return d_value_tile[candidate_index] * x * alpha;
}
//template<typename vT>
//__forceinline__ __device__
//vT segmented_sum_shfl(vT tmp_sum,
// const int scansum_offset,
// const int lane_id)
//{
// vT sum = __shfl_down(tmp_sum, 1);
// sum = lane_id == MAGMA_CSR5_OMEGA - 1 ? 0 : sum;
// // inclusive scan
// vT scan_sum = scan_32_shfl(sum); //scan_32_shfl<vT>(sum, lane_id);
// tmp_sum = __shfl_down(scan_sum, scansum_offset);
// tmp_sum = tmp_sum - scan_sum + sum;
//
// return tmp_sum;
//}
__forceinline__ __device__ magmaFloatComplex
segmented_sum(
magmaFloatComplex tmp_sum,
magmaFloatComplex *s_sum,
const magma_index_t scansum_offset,
const magma_index_t lane_id)
{
if (lane_id)
s_sum[lane_id - 1] = tmp_sum;
s_sum[lane_id] = lane_id == MAGMA_CSR5_OMEGA - 1
? MAGMA_C_ZERO : s_sum[lane_id];
magmaFloatComplex sum = tmp_sum = s_sum[lane_id];
scan_32(s_sum, lane_id); // exclusive scan
s_sum[lane_id] += tmp_sum; // inclusive scan (exclusive scan+original val)
tmp_sum = s_sum[lane_id + scansum_offset];
tmp_sum = tmp_sum - s_sum[lane_id] + sum;
return tmp_sum;
}
template<int c_sigma>
__inline__ __device__ void
tile_fast_track(
magmaFloatComplex *d_value_tile,
magmaFloatComplex *d_x,
const magma_index_t *d_column_index_tile,
magmaFloatComplex *d_calibrator,
//#if __CUDA_ARCH__ < 300
magmaFloatComplex *s_sum,
//#endif
const int lane_id,
const magma_index_t par_id,
const magmaFloatComplex alpha)
{
magmaFloatComplex sum = MAGMA_C_ZERO;
#pragma unroll
for (int i = 0; i < c_sigma; i++)
{
sum += candidate(d_value_tile, d_x, d_column_index_tile,
i * MAGMA_CSR5_OMEGA + lane_id, alpha);
}
//#if __CUDA_ARCH__ >= 300 // use shfl intrinsic
// sum = sum_32_shfl<vT>(sum);
// if (!lane_id)
// d_calibrator[par_id] = sum;
//#else // use smem
s_sum[lane_id] = sum;
sum_32(s_sum, lane_id);
if (!lane_id)
{
d_calibrator[par_id] = s_sum[0];
}
//#endif
}
template<int c_sigma>
__inline__ __device__ void
tile_normal_track(
const magma_index_t *d_column_index_tile,
magmaFloatComplex *d_value_tile,
magmaFloatComplex *d_x,
const magma_uindex_t *d_tile_desc,
const magma_index_t *d_tile_desc_offset_ptr,
const magma_index_t *d_tile_desc_offset,
magmaFloatComplex *d_calibrator,
magmaFloatComplex *d_y,
//#if __CUDA_ARCH__ < 300
magmaFloatComplex *s_sum,
volatile int *s_scan,
//#endif
const magma_index_t par_id,
const int lane_id,
const int bit_y_offset,
const int bit_scansum_offset,
const bool empty_rows,
const magmaFloatComplex alpha)
{
int start = 0;
int stop = 0;
bool local_bit;
magmaFloatComplex sum = MAGMA_C_ZERO;
magma_index_t offset_pointer = empty_rows ?
d_tile_desc_offset_ptr[par_id] : 0;
magma_uindex_t descriptor = d_tile_desc[lane_id];
magma_index_t y_offset = descriptor >> (32 - bit_y_offset);
const int scansum_offset = (descriptor << bit_y_offset)
>> (32 - bit_scansum_offset);
const int bit_bitflag = 32 - bit_y_offset - bit_scansum_offset;
bool direct = false;
magmaFloatComplex first_sum, last_sum;
// step 1. thread-level seg sum
int ly = 0;
// extract the first bit-flag packet
descriptor = descriptor << (bit_y_offset + bit_scansum_offset);
descriptor = lane_id ? descriptor : descriptor | 0x80000000;
local_bit = (descriptor >> 31) & 0x1;
start = !local_bit;
direct = local_bit & (bool)lane_id;
sum = candidate(d_value_tile, d_x,
d_column_index_tile, lane_id, alpha);
#pragma unroll
for (int i = 1; i < c_sigma; i++)
{
int norm_i = i - bit_bitflag;
if (!(ly || norm_i) || (ly && !(31 & norm_i)))
{
ly++;
descriptor = d_tile_desc[ly * MAGMA_CSR5_OMEGA + lane_id];
}
norm_i = !ly ? 31 & i : 31 & norm_i;
norm_i = 31 - norm_i;
local_bit = (descriptor >> norm_i) & 0x1;
if (local_bit)
{
if (direct)
d_y[empty_rows ? d_tile_desc_offset[offset_pointer + y_offset]
: y_offset] += sum;
else
first_sum = sum;
}
y_offset += local_bit & direct;
direct |= local_bit;
sum = local_bit ? MAGMA_C_ZERO : sum;
stop += local_bit;
sum += candidate(d_value_tile, d_x, d_column_index_tile,
i * MAGMA_CSR5_OMEGA + lane_id, alpha);
}
first_sum = direct ? first_sum : sum;
last_sum = sum;
// step 2. segmented sum
sum = start ? first_sum : MAGMA_C_ZERO;
//#if __CUDA_ARCH__ >= 300
// sum = segmented_sum_shfl<vT>(sum, scansum_offset, lane_id);
//#else
sum = segmented_sum(sum, s_sum, scansum_offset, lane_id);
//#endif
// step 3-1. add s_sum to position stop
last_sum += (start <= stop) ? sum : MAGMA_C_ZERO;
// step 3-2. write sums to result array
if (direct)
d_y[empty_rows ? d_tile_desc_offset[offset_pointer + y_offset]
: y_offset] += last_sum;
// the first/last value of the first thread goes to calibration
if (!lane_id)
d_calibrator[par_id] = direct ? first_sum : last_sum;
}
template<int c_sigma>
__inline__ __device__ void
spmv_tile(
const magma_index_t *d_column_index_tile,
magmaFloatComplex *d_value_tile,
const magma_index_t *d_row_pointer,
magmaFloatComplex *d_x,
const magma_uindex_t *d_tile_ptr,
const magma_uindex_t *d_tile_desc,
const magma_index_t *d_tile_desc_offset_ptr,
const magma_index_t *d_tile_desc_offset,
magmaFloatComplex *d_calibrator,
magmaFloatComplex *d_y,
const magma_index_t par_id,
const int lane_id,
const int bunch_id,
const int bit_y_offset,
const int bit_scansum_offset,
const magmaFloatComplex alpha)
{
//#if __CUDA_ARCH__ < 300
__shared__ magmaFloatComplex
s_sum[MAGMA_CSR5_THREAD_GROUP];
volatile __shared__ int
s_scan[(MAGMA_CSR5_OMEGA + 1) *
(MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA)];
//#endif
magma_uindex_t row_start, row_stop;
//#if __CUDA_ARCH__ >= 350
// if (lane_id < 2)
// row_start = __ldg(&d_tile_ptr[par_id + lane_id]);
// row_stop = __shfl(row_start, 1);
// row_start = __shfl(row_start, 0);
// row_stop &= 0x7FFFFFFF;
//#else
volatile __shared__ magma_uindex_t
s_row_start_stop[MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA + 1];
if (threadIdx.x < MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA + 1)
{
s_row_start_stop[threadIdx.x] = d_tile_ptr[par_id + threadIdx.x];
}
__syncthreads();
row_start = s_row_start_stop[bunch_id];
row_stop = s_row_start_stop[bunch_id + 1] & 0x7FFFFFFF;
//#endif
if (row_start == row_stop) // fast track through reduction
{
tile_fast_track<c_sigma>
(d_value_tile, d_x, d_column_index_tile, d_calibrator,
//#if __CUDA_ARCH__ < 300
&s_sum[bunch_id * MAGMA_CSR5_OMEGA],
//#endif
lane_id, par_id, alpha);
}
else
{
const bool empty_rows = (row_start >> 31) & 0x1;
row_start &= 0x7FFFFFFF;
d_y = &d_y[row_start+1];
tile_normal_track<c_sigma>
(d_column_index_tile, d_value_tile, d_x,
d_tile_desc, d_tile_desc_offset_ptr,
d_tile_desc_offset, d_calibrator, d_y,
//#if __CUDA_ARCH__ < 300
&s_sum[bunch_id * MAGMA_CSR5_OMEGA],
&s_scan[bunch_id * (MAGMA_CSR5_OMEGA + 1)],
//#endif
par_id, lane_id,
bit_y_offset, bit_scansum_offset, empty_rows, alpha);
}
}
template<int c_sigma>
__global__ void
spmv_csr5_compute_kernel(
const magma_index_t *d_column_index,
magmaFloatComplex *d_value,
const magma_index_t *d_row_pointer,
magmaFloatComplex *d_x,
const magma_uindex_t *d_tile_ptr,
const magma_uindex_t *d_tile_desc,
const magma_index_t *d_tile_desc_offset_ptr,
const magma_index_t *d_tile_desc_offset,
magmaFloatComplex *d_calibrator,
magmaFloatComplex *d_y,
const magma_index_t p,
const int num_packet,
const int bit_y_offset,
const int bit_scansum_offset,
const magmaFloatComplex alpha)
{
// warp lane id
const int lane_id = 31 & threadIdx.x; //threadIdx.x % CSR5_OMEGA;
// warp global id == par_id
const magma_index_t par_id = (blockIdx.x * blockDim.x + threadIdx.x)
/ MAGMA_CSR5_OMEGA;
const int bunch_id = threadIdx.x / MAGMA_CSR5_OMEGA;
if (par_id >= p - 1)
return;
spmv_tile<c_sigma>
(&d_column_index[par_id * MAGMA_CSR5_OMEGA * c_sigma],
&d_value[par_id * MAGMA_CSR5_OMEGA * c_sigma],
d_row_pointer, d_x, d_tile_ptr,
&d_tile_desc[par_id * MAGMA_CSR5_OMEGA * num_packet],
d_tile_desc_offset_ptr, d_tile_desc_offset,
d_calibrator, d_y,
par_id, lane_id, bunch_id, bit_y_offset, bit_scansum_offset, alpha);
}
__global__ void
spmv_csr5_calibrate_kernel(
const magma_uindex_t *d_tile_ptr,
const magmaFloatComplex *d_calibrator,
magmaFloatComplex *d_y,
const magma_index_t p)
{
//const int lane_id = threadIdx.x % MAGMA_CSR5_THREAD_BUNCH;
//const int bunch_id = threadIdx.x / MAGMA_CSR5_THREAD_BUNCH;
const int local_id = threadIdx.x;
const magma_index_t global_id = blockIdx.x * blockDim.x + threadIdx.x;
magmaFloatComplex sum;
volatile __shared__
magma_index_t s_tile_ptr[MAGMA_CSR5_THREAD_GROUP+1];
__shared__ magmaFloatComplex s_calibrator[MAGMA_CSR5_THREAD_GROUP];
//volatile __shared__
// magmaFloatComplex s_sum[MAGMA_CSR5_THREAD_GROUP
// / MAGMA_CSR5_THREAD_BUNCH];
s_tile_ptr[local_id] = global_id < p-1 ?
(magma_index_t)(d_tile_ptr[global_id] & 0x7FFFFFFF) : -1;
s_calibrator[local_id] = sum = global_id < p-1 ?
d_calibrator[global_id] : MAGMA_C_ZERO;
__syncthreads();
// do a fast track if all s_tile_ptr are the same
if (s_tile_ptr[0] == s_tile_ptr[MAGMA_CSR5_THREAD_GROUP - 1])
{
//sum = sum_32_shfl<vT>(sum);
//if (!lane_id)
// s_sum[bunch_id] = sum;
//__syncthreads();
//if (!bunch_id)
//{
// sum = lane_id < (MAGMA_CSR5_THREAD_GROUP
// / MAGMA_CSR5_THREAD_BUNCH) ? s_sum[lane_id] : 0;
// sum = sum_32_shfl<vT>(sum);
//}
if (local_id < 64) s_calibrator[local_id] += s_calibrator[local_id+64];
__syncthreads();
if (local_id < 32) s_calibrator[local_id] += s_calibrator[local_id+32];
if (local_id < 16) s_calibrator[local_id] += s_calibrator[local_id+16];
if (local_id < 8) s_calibrator[local_id] += s_calibrator[local_id+8];
if (local_id < 4) s_calibrator[local_id] += s_calibrator[local_id+4];
if (local_id < 2) s_calibrator[local_id] += s_calibrator[local_id+2];
if (local_id < 1) s_calibrator[local_id] += s_calibrator[local_id+1];
if (!local_id)
{
atomicAddmagmaFloatComplex(&d_y[s_tile_ptr[0]], s_calibrator[0]);
}
return;
}
int local_par_id = local_id;
magma_index_t row_start_current, row_start_target, row_start_previous;
sum = MAGMA_C_ZERO;
// use (p - 1), due to the tail tile is dealt with CSR-vector method
if (global_id < p - 1)
{
row_start_previous = local_id ? s_tile_ptr[local_id-1] : -1;
row_start_current = s_tile_ptr[local_id];
if (row_start_previous != row_start_current)
{
row_start_target = row_start_current;
while (row_start_target == row_start_current
&& local_par_id < blockDim.x)
{
sum += s_calibrator[local_par_id];
local_par_id++;
row_start_current = s_tile_ptr[local_par_id];
}
if (row_start_target == s_tile_ptr[0]
|| row_start_target == s_tile_ptr[MAGMA_CSR5_THREAD_GROUP-1])
{
atomicAddmagmaFloatComplex(&d_y[row_start_target], sum);
}
else
d_y[row_start_target] += sum;
}
}
}
__global__ void
spmv_csr5_tail_tile_kernel(
const magma_index_t *d_row_pointer,
const magma_index_t *d_column_index,
magmaFloatComplex *d_value,
magmaFloatComplex *d_x,
magmaFloatComplex *d_y,
const magma_index_t tail_tile_start,
const magma_index_t p,
const int sigma,
const magmaFloatComplex alpha)
{
const int local_id = threadIdx.x;
const magma_index_t row_id = tail_tile_start + blockIdx.x;
const magma_index_t row_start = !blockIdx.x ? (p - 1)
* MAGMA_CSR5_OMEGA * sigma
: d_row_pointer[row_id];
const magma_index_t row_stop = d_row_pointer[row_id + 1];
magmaFloatComplex sum = MAGMA_C_ZERO;
for (magma_index_t idx = local_id + row_start;
idx < row_stop; idx += MAGMA_CSR5_OMEGA)
{
sum += candidate(d_value, d_x, d_column_index, idx, alpha);
}
//#if __CUDA_ARCH__ >= 300 // use shfl intrinsic
// sum = sum_32_shfl<vT>(sum);
//#else
__shared__ magmaFloatComplex s_sum[MAGMA_CSR5_OMEGA];
s_sum[local_id] = sum;
sum_32(s_sum, local_id);
//#endif
if (!local_id)
d_y[row_id] += s_sum[0]; //= !blockIdx.x ? d_y[row_id] + sum : sum;
}
__global__ void
cgecsr5mv_kernel_update_y(int num_rows,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
const magma_index_t row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < num_rows)
{
if (beta == MAGMA_C_ZERO)
dy[row] = MAGMA_C_ZERO;
else
dy[row] *= beta;
}
}
#endif
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR5 (val (tile-wise column-major),
row_pointer,
col (tile-wise column-major),
tile_pointer,
tile_desc).
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
p magma_int_t
number of tiles in A
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
sigma magma_int_t
sigma in A in CSR5
@param[in]
bit_y_offset magma_int_t
bit_y_offset in A in CSR5
@param[in]
bit_scansum_offset magma_int_t
bit_scansum_offset in A in CSR5
@param[in]
num_packet magma_int_t
num_packet in A in CSR5
@param[in]
dtile_ptr magmaUIndex_ptr
tilepointer of A in CSR5
@param[in]
dtile_desc magmaUIndex_ptr
tiledescriptor of A in CSR5
@param[in]
dtile_desc_offset_ptr magmaIndex_ptr
tiledescriptor_offsetpointer of A in CSR5
@param[in]
dtile_desc_offset magmaIndex_ptr
tiledescriptor_offsetpointer of A in CSR5
@param[in]
dcalibrator magmaFloatComplex_ptr
calibrator of A in CSR5
@param[in]
tail_tile_start magma_int_t
start of the last tile in A
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in CSR
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgecsr5mv(
magma_trans_t transA,
magma_int_t m,
magma_int_t n,
magma_int_t p,
magmaFloatComplex alpha,
magma_int_t sigma,
magma_int_t bit_y_offset,
magma_int_t bit_scansum_offset,
magma_int_t num_packet,
magmaUIndex_ptr dtile_ptr,
magmaUIndex_ptr dtile_desc,
magmaIndex_ptr dtile_desc_offset_ptr,
magmaIndex_ptr dtile_desc_offset,
magmaFloatComplex_ptr dcalibrator,
magma_int_t tail_tile_start,
magmaFloatComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
int info = MAGMA_ERR_NOT_SUPPORTED;
#if (defined( TORCH_HIP_VERSION ) && ( TORCH_HIP_VERSION >= 8000 ))
magma_int_t arch = magma_getdevice_arch();
if ( arch >= 600 ) {
//dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
//magma_int_t threads = BLOCK_SIZE;
//cgecsrmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
// (m, n, alpha, dval, drowptr, dcolind, dx, beta, dy);
// phase 1. update y: y = beta * y
magma_int_t num_threads = MAGMA_CSR5_THREAD_GROUP;
magma_int_t num_blocks = magma_ceildiv( m, num_threads );
//ceil ((float)m / (float)num_threads);
hipLaunchKernelGGL(( cgecsr5mv_kernel_update_y)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , m, beta, dy);
// phase 2. spmv: y += alpha * A * x
num_threads = MAGMA_CSR5_THREAD_GROUP;
num_blocks = magma_ceildiv( p-1, num_threads / MAGMA_CSR5_OMEGA );
// ceil ((float)(p-1) / (float)(num_threads / MAGMA_CSR5_OMEGA));
switch (sigma)
{
case 4:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<4>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 5:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<5>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 6:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<6>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 7:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<7>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 8:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<8>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 9:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<9>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 10:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<10>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 11:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<11>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 12:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<12>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 13:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<13>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 14:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<14>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 15:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<15>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 16:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<16>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 17:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<17>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 18:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<18>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 19:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<19>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 20:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<20>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 21:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<21>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 22:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<22>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 23:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<23>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 24:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<24>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 25:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<25>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 26:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<26>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 27:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<27>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 28:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<28>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 29:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<29>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 30:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<30>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 31:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<31>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 32:
hipLaunchKernelGGL(( spmv_csr5_compute_kernel<32>)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
}
num_threads = MAGMA_CSR5_THREAD_GROUP;
num_blocks = ceil((float)(p-1)/(float)num_threads);
hipLaunchKernelGGL(( spmv_csr5_calibrate_kernel)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
dtile_ptr, dcalibrator, dy, p);
num_threads = MAGMA_CSR5_OMEGA;
num_blocks = m - tail_tile_start;
hipLaunchKernelGGL(( spmv_csr5_tail_tile_kernel)
, dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() ,
drowptr, dcolind, dval, dx, dy,
tail_tile_start, p, sigma, alpha);
info = MAGMA_SUCCESS;
}
else {
info = MAGMA_ERR_NOT_SUPPORTED;
}
#endif
return info;
}
| c9fc9c80118ddf15e6a412a175ba83a1f3127654.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zgecsr5mv.cu, normal z -> c, Mon Jun 25 18:24:25 2018
@author Weifeng Liu
*/
// CSR5 SpMV kernel
// see paper by W. Liu and B. Vinter. (2015).
// "CSR5: An Efficient Storage Format for Cross-Platform
// Sparse Matrix-Vector Multiplication".
// 29th ACM International Conference on Supercomputing (ICS15). pp. 339-350.
#include "magmasparse_internal.h"
#include "atomicopsmagmaFloatComplex.h"
#include <cuda.h> // for CUDA_VERSION
#define MAGMA_CSR5_THREAD_GROUP 128
#define MAGMA_CSR5_THREAD_BUNCH 32
#if (defined( CUDA_VERSION ) && ( CUDA_VERSION >= 8000 ))
__inline__ __device__ void
sum_32(
magmaFloatComplex *s_sum,
const int local_id)
{
if (local_id < 16) s_sum[local_id] += s_sum[local_id + 16];
if (local_id < 8) s_sum[local_id] += s_sum[local_id + 8];
if (local_id < 4) s_sum[local_id] += s_sum[local_id + 4];
if (local_id < 2) s_sum[local_id] += s_sum[local_id + 2];
if (local_id < 1) s_sum[local_id] += s_sum[local_id + 1];
}
__inline__ __device__ void
scan_32(
magmaFloatComplex *s_scan,
const int local_id)
{
int ai, bi;
const int baseai = 2 * local_id + 1;
const int basebi = baseai + 1;
magmaFloatComplex temp;
if (local_id < 16) { ai = baseai - 1; bi = basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1;
s_scan[bi] += s_scan[ai]; }
if (local_id == 0) { s_scan[31] = s_scan[15]; s_scan[15] = MAGMA_C_ZERO; }
if (local_id < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
if (local_id < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
if (local_id < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
if (local_id < 16) { ai = baseai - 1; bi = basebi - 1;
temp = s_scan[ai]; s_scan[ai] = s_scan[bi];
s_scan[bi] += temp; }
}
__inline__ __device__ magmaFloatComplex
candidate(
magmaFloatComplex *d_value_tile,
magmaFloatComplex *d_x,
const magma_index_t *d_column_index_tile,
const magma_index_t candidate_index,
const magmaFloatComplex alpha)
{
magmaFloatComplex x = MAGMA_C_ZERO;
#if __CUDA_ARCH__ >= 350
x = __ldg(&d_x[d_column_index_tile[candidate_index]]);
#else
x = d_x[d_column_index_tile[candidate_index]];
#endif
return d_value_tile[candidate_index] * x * alpha;
}
//template<typename vT>
//__forceinline__ __device__
//vT segmented_sum_shfl(vT tmp_sum,
// const int scansum_offset,
// const int lane_id)
//{
// vT sum = __shfl_down(tmp_sum, 1);
// sum = lane_id == MAGMA_CSR5_OMEGA - 1 ? 0 : sum;
// // inclusive scan
// vT scan_sum = scan_32_shfl(sum); //scan_32_shfl<vT>(sum, lane_id);
// tmp_sum = __shfl_down(scan_sum, scansum_offset);
// tmp_sum = tmp_sum - scan_sum + sum;
//
// return tmp_sum;
//}
__forceinline__ __device__ magmaFloatComplex
segmented_sum(
magmaFloatComplex tmp_sum,
magmaFloatComplex *s_sum,
const magma_index_t scansum_offset,
const magma_index_t lane_id)
{
if (lane_id)
s_sum[lane_id - 1] = tmp_sum;
s_sum[lane_id] = lane_id == MAGMA_CSR5_OMEGA - 1
? MAGMA_C_ZERO : s_sum[lane_id];
magmaFloatComplex sum = tmp_sum = s_sum[lane_id];
scan_32(s_sum, lane_id); // exclusive scan
s_sum[lane_id] += tmp_sum; // inclusive scan (exclusive scan+original val)
tmp_sum = s_sum[lane_id + scansum_offset];
tmp_sum = tmp_sum - s_sum[lane_id] + sum;
return tmp_sum;
}
template<int c_sigma>
__inline__ __device__ void
tile_fast_track(
magmaFloatComplex *d_value_tile,
magmaFloatComplex *d_x,
const magma_index_t *d_column_index_tile,
magmaFloatComplex *d_calibrator,
//#if __CUDA_ARCH__ < 300
magmaFloatComplex *s_sum,
//#endif
const int lane_id,
const magma_index_t par_id,
const magmaFloatComplex alpha)
{
magmaFloatComplex sum = MAGMA_C_ZERO;
#pragma unroll
for (int i = 0; i < c_sigma; i++)
{
sum += candidate(d_value_tile, d_x, d_column_index_tile,
i * MAGMA_CSR5_OMEGA + lane_id, alpha);
}
//#if __CUDA_ARCH__ >= 300 // use shfl intrinsic
// sum = sum_32_shfl<vT>(sum);
// if (!lane_id)
// d_calibrator[par_id] = sum;
//#else // use smem
s_sum[lane_id] = sum;
sum_32(s_sum, lane_id);
if (!lane_id)
{
d_calibrator[par_id] = s_sum[0];
}
//#endif
}
template<int c_sigma>
__inline__ __device__ void
tile_normal_track(
const magma_index_t *d_column_index_tile,
magmaFloatComplex *d_value_tile,
magmaFloatComplex *d_x,
const magma_uindex_t *d_tile_desc,
const magma_index_t *d_tile_desc_offset_ptr,
const magma_index_t *d_tile_desc_offset,
magmaFloatComplex *d_calibrator,
magmaFloatComplex *d_y,
//#if __CUDA_ARCH__ < 300
magmaFloatComplex *s_sum,
volatile int *s_scan,
//#endif
const magma_index_t par_id,
const int lane_id,
const int bit_y_offset,
const int bit_scansum_offset,
const bool empty_rows,
const magmaFloatComplex alpha)
{
int start = 0;
int stop = 0;
bool local_bit;
magmaFloatComplex sum = MAGMA_C_ZERO;
magma_index_t offset_pointer = empty_rows ?
d_tile_desc_offset_ptr[par_id] : 0;
magma_uindex_t descriptor = d_tile_desc[lane_id];
magma_index_t y_offset = descriptor >> (32 - bit_y_offset);
const int scansum_offset = (descriptor << bit_y_offset)
>> (32 - bit_scansum_offset);
const int bit_bitflag = 32 - bit_y_offset - bit_scansum_offset;
bool direct = false;
magmaFloatComplex first_sum, last_sum;
// step 1. thread-level seg sum
int ly = 0;
// extract the first bit-flag packet
descriptor = descriptor << (bit_y_offset + bit_scansum_offset);
descriptor = lane_id ? descriptor : descriptor | 0x80000000;
local_bit = (descriptor >> 31) & 0x1;
start = !local_bit;
direct = local_bit & (bool)lane_id;
sum = candidate(d_value_tile, d_x,
d_column_index_tile, lane_id, alpha);
#pragma unroll
for (int i = 1; i < c_sigma; i++)
{
int norm_i = i - bit_bitflag;
if (!(ly || norm_i) || (ly && !(31 & norm_i)))
{
ly++;
descriptor = d_tile_desc[ly * MAGMA_CSR5_OMEGA + lane_id];
}
norm_i = !ly ? 31 & i : 31 & norm_i;
norm_i = 31 - norm_i;
local_bit = (descriptor >> norm_i) & 0x1;
if (local_bit)
{
if (direct)
d_y[empty_rows ? d_tile_desc_offset[offset_pointer + y_offset]
: y_offset] += sum;
else
first_sum = sum;
}
y_offset += local_bit & direct;
direct |= local_bit;
sum = local_bit ? MAGMA_C_ZERO : sum;
stop += local_bit;
sum += candidate(d_value_tile, d_x, d_column_index_tile,
i * MAGMA_CSR5_OMEGA + lane_id, alpha);
}
first_sum = direct ? first_sum : sum;
last_sum = sum;
// step 2. segmented sum
sum = start ? first_sum : MAGMA_C_ZERO;
//#if __CUDA_ARCH__ >= 300
// sum = segmented_sum_shfl<vT>(sum, scansum_offset, lane_id);
//#else
sum = segmented_sum(sum, s_sum, scansum_offset, lane_id);
//#endif
// step 3-1. add s_sum to position stop
last_sum += (start <= stop) ? sum : MAGMA_C_ZERO;
// step 3-2. write sums to result array
if (direct)
d_y[empty_rows ? d_tile_desc_offset[offset_pointer + y_offset]
: y_offset] += last_sum;
// the first/last value of the first thread goes to calibration
if (!lane_id)
d_calibrator[par_id] = direct ? first_sum : last_sum;
}
template<int c_sigma>
__inline__ __device__ void
spmv_tile(
const magma_index_t *d_column_index_tile,
magmaFloatComplex *d_value_tile,
const magma_index_t *d_row_pointer,
magmaFloatComplex *d_x,
const magma_uindex_t *d_tile_ptr,
const magma_uindex_t *d_tile_desc,
const magma_index_t *d_tile_desc_offset_ptr,
const magma_index_t *d_tile_desc_offset,
magmaFloatComplex *d_calibrator,
magmaFloatComplex *d_y,
const magma_index_t par_id,
const int lane_id,
const int bunch_id,
const int bit_y_offset,
const int bit_scansum_offset,
const magmaFloatComplex alpha)
{
//#if __CUDA_ARCH__ < 300
__shared__ magmaFloatComplex
s_sum[MAGMA_CSR5_THREAD_GROUP];
volatile __shared__ int
s_scan[(MAGMA_CSR5_OMEGA + 1) *
(MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA)];
//#endif
magma_uindex_t row_start, row_stop;
//#if __CUDA_ARCH__ >= 350
// if (lane_id < 2)
// row_start = __ldg(&d_tile_ptr[par_id + lane_id]);
// row_stop = __shfl(row_start, 1);
// row_start = __shfl(row_start, 0);
// row_stop &= 0x7FFFFFFF;
//#else
volatile __shared__ magma_uindex_t
s_row_start_stop[MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA + 1];
if (threadIdx.x < MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA + 1)
{
s_row_start_stop[threadIdx.x] = d_tile_ptr[par_id + threadIdx.x];
}
__syncthreads();
row_start = s_row_start_stop[bunch_id];
row_stop = s_row_start_stop[bunch_id + 1] & 0x7FFFFFFF;
//#endif
if (row_start == row_stop) // fast track through reduction
{
tile_fast_track<c_sigma>
(d_value_tile, d_x, d_column_index_tile, d_calibrator,
//#if __CUDA_ARCH__ < 300
&s_sum[bunch_id * MAGMA_CSR5_OMEGA],
//#endif
lane_id, par_id, alpha);
}
else
{
const bool empty_rows = (row_start >> 31) & 0x1;
row_start &= 0x7FFFFFFF;
d_y = &d_y[row_start+1];
tile_normal_track<c_sigma>
(d_column_index_tile, d_value_tile, d_x,
d_tile_desc, d_tile_desc_offset_ptr,
d_tile_desc_offset, d_calibrator, d_y,
//#if __CUDA_ARCH__ < 300
&s_sum[bunch_id * MAGMA_CSR5_OMEGA],
&s_scan[bunch_id * (MAGMA_CSR5_OMEGA + 1)],
//#endif
par_id, lane_id,
bit_y_offset, bit_scansum_offset, empty_rows, alpha);
}
}
template<int c_sigma>
__global__ void
spmv_csr5_compute_kernel(
const magma_index_t *d_column_index,
magmaFloatComplex *d_value,
const magma_index_t *d_row_pointer,
magmaFloatComplex *d_x,
const magma_uindex_t *d_tile_ptr,
const magma_uindex_t *d_tile_desc,
const magma_index_t *d_tile_desc_offset_ptr,
const magma_index_t *d_tile_desc_offset,
magmaFloatComplex *d_calibrator,
magmaFloatComplex *d_y,
const magma_index_t p,
const int num_packet,
const int bit_y_offset,
const int bit_scansum_offset,
const magmaFloatComplex alpha)
{
// warp lane id
const int lane_id = 31 & threadIdx.x; //threadIdx.x % CSR5_OMEGA;
// warp global id == par_id
const magma_index_t par_id = (blockIdx.x * blockDim.x + threadIdx.x)
/ MAGMA_CSR5_OMEGA;
const int bunch_id = threadIdx.x / MAGMA_CSR5_OMEGA;
if (par_id >= p - 1)
return;
spmv_tile<c_sigma>
(&d_column_index[par_id * MAGMA_CSR5_OMEGA * c_sigma],
&d_value[par_id * MAGMA_CSR5_OMEGA * c_sigma],
d_row_pointer, d_x, d_tile_ptr,
&d_tile_desc[par_id * MAGMA_CSR5_OMEGA * num_packet],
d_tile_desc_offset_ptr, d_tile_desc_offset,
d_calibrator, d_y,
par_id, lane_id, bunch_id, bit_y_offset, bit_scansum_offset, alpha);
}
__global__ void
spmv_csr5_calibrate_kernel(
const magma_uindex_t *d_tile_ptr,
const magmaFloatComplex *d_calibrator,
magmaFloatComplex *d_y,
const magma_index_t p)
{
//const int lane_id = threadIdx.x % MAGMA_CSR5_THREAD_BUNCH;
//const int bunch_id = threadIdx.x / MAGMA_CSR5_THREAD_BUNCH;
const int local_id = threadIdx.x;
const magma_index_t global_id = blockIdx.x * blockDim.x + threadIdx.x;
magmaFloatComplex sum;
volatile __shared__
magma_index_t s_tile_ptr[MAGMA_CSR5_THREAD_GROUP+1];
__shared__ magmaFloatComplex s_calibrator[MAGMA_CSR5_THREAD_GROUP];
//volatile __shared__
// magmaFloatComplex s_sum[MAGMA_CSR5_THREAD_GROUP
// / MAGMA_CSR5_THREAD_BUNCH];
s_tile_ptr[local_id] = global_id < p-1 ?
(magma_index_t)(d_tile_ptr[global_id] & 0x7FFFFFFF) : -1;
s_calibrator[local_id] = sum = global_id < p-1 ?
d_calibrator[global_id] : MAGMA_C_ZERO;
__syncthreads();
// do a fast track if all s_tile_ptr are the same
if (s_tile_ptr[0] == s_tile_ptr[MAGMA_CSR5_THREAD_GROUP - 1])
{
//sum = sum_32_shfl<vT>(sum);
//if (!lane_id)
// s_sum[bunch_id] = sum;
//__syncthreads();
//if (!bunch_id)
//{
// sum = lane_id < (MAGMA_CSR5_THREAD_GROUP
// / MAGMA_CSR5_THREAD_BUNCH) ? s_sum[lane_id] : 0;
// sum = sum_32_shfl<vT>(sum);
//}
if (local_id < 64) s_calibrator[local_id] += s_calibrator[local_id+64];
__syncthreads();
if (local_id < 32) s_calibrator[local_id] += s_calibrator[local_id+32];
if (local_id < 16) s_calibrator[local_id] += s_calibrator[local_id+16];
if (local_id < 8) s_calibrator[local_id] += s_calibrator[local_id+8];
if (local_id < 4) s_calibrator[local_id] += s_calibrator[local_id+4];
if (local_id < 2) s_calibrator[local_id] += s_calibrator[local_id+2];
if (local_id < 1) s_calibrator[local_id] += s_calibrator[local_id+1];
if (!local_id)
{
atomicAddmagmaFloatComplex(&d_y[s_tile_ptr[0]], s_calibrator[0]);
}
return;
}
int local_par_id = local_id;
magma_index_t row_start_current, row_start_target, row_start_previous;
sum = MAGMA_C_ZERO;
// use (p - 1), due to the tail tile is dealt with CSR-vector method
if (global_id < p - 1)
{
row_start_previous = local_id ? s_tile_ptr[local_id-1] : -1;
row_start_current = s_tile_ptr[local_id];
if (row_start_previous != row_start_current)
{
row_start_target = row_start_current;
while (row_start_target == row_start_current
&& local_par_id < blockDim.x)
{
sum += s_calibrator[local_par_id];
local_par_id++;
row_start_current = s_tile_ptr[local_par_id];
}
if (row_start_target == s_tile_ptr[0]
|| row_start_target == s_tile_ptr[MAGMA_CSR5_THREAD_GROUP-1])
{
atomicAddmagmaFloatComplex(&d_y[row_start_target], sum);
}
else
d_y[row_start_target] += sum;
}
}
}
__global__ void
spmv_csr5_tail_tile_kernel(
const magma_index_t *d_row_pointer,
const magma_index_t *d_column_index,
magmaFloatComplex *d_value,
magmaFloatComplex *d_x,
magmaFloatComplex *d_y,
const magma_index_t tail_tile_start,
const magma_index_t p,
const int sigma,
const magmaFloatComplex alpha)
{
const int local_id = threadIdx.x;
const magma_index_t row_id = tail_tile_start + blockIdx.x;
const magma_index_t row_start = !blockIdx.x ? (p - 1)
* MAGMA_CSR5_OMEGA * sigma
: d_row_pointer[row_id];
const magma_index_t row_stop = d_row_pointer[row_id + 1];
magmaFloatComplex sum = MAGMA_C_ZERO;
for (magma_index_t idx = local_id + row_start;
idx < row_stop; idx += MAGMA_CSR5_OMEGA)
{
sum += candidate(d_value, d_x, d_column_index, idx, alpha);
}
//#if __CUDA_ARCH__ >= 300 // use shfl intrinsic
// sum = sum_32_shfl<vT>(sum);
//#else
__shared__ magmaFloatComplex s_sum[MAGMA_CSR5_OMEGA];
s_sum[local_id] = sum;
sum_32(s_sum, local_id);
//#endif
if (!local_id)
d_y[row_id] += s_sum[0]; //= !blockIdx.x ? d_y[row_id] + sum : sum;
}
__global__ void
cgecsr5mv_kernel_update_y(int num_rows,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
const magma_index_t row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < num_rows)
{
if (beta == MAGMA_C_ZERO)
dy[row] = MAGMA_C_ZERO;
else
dy[row] *= beta;
}
}
#endif
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR5 (val (tile-wise column-major),
row_pointer,
col (tile-wise column-major),
tile_pointer,
tile_desc).
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
p magma_int_t
number of tiles in A
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
sigma magma_int_t
sigma in A in CSR5
@param[in]
bit_y_offset magma_int_t
bit_y_offset in A in CSR5
@param[in]
bit_scansum_offset magma_int_t
bit_scansum_offset in A in CSR5
@param[in]
num_packet magma_int_t
num_packet in A in CSR5
@param[in]
dtile_ptr magmaUIndex_ptr
tilepointer of A in CSR5
@param[in]
dtile_desc magmaUIndex_ptr
tiledescriptor of A in CSR5
@param[in]
dtile_desc_offset_ptr magmaIndex_ptr
tiledescriptor_offsetpointer of A in CSR5
@param[in]
dtile_desc_offset magmaIndex_ptr
tiledescriptor_offsetpointer of A in CSR5
@param[in]
dcalibrator magmaFloatComplex_ptr
calibrator of A in CSR5
@param[in]
tail_tile_start magma_int_t
start of the last tile in A
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in CSR
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgecsr5mv(
magma_trans_t transA,
magma_int_t m,
magma_int_t n,
magma_int_t p,
magmaFloatComplex alpha,
magma_int_t sigma,
magma_int_t bit_y_offset,
magma_int_t bit_scansum_offset,
magma_int_t num_packet,
magmaUIndex_ptr dtile_ptr,
magmaUIndex_ptr dtile_desc,
magmaIndex_ptr dtile_desc_offset_ptr,
magmaIndex_ptr dtile_desc_offset,
magmaFloatComplex_ptr dcalibrator,
magma_int_t tail_tile_start,
magmaFloatComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
int info = MAGMA_ERR_NOT_SUPPORTED;
#if (defined( CUDA_VERSION ) && ( CUDA_VERSION >= 8000 ))
magma_int_t arch = magma_getdevice_arch();
if ( arch >= 600 ) {
//dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
//magma_int_t threads = BLOCK_SIZE;
//cgecsrmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
// (m, n, alpha, dval, drowptr, dcolind, dx, beta, dy);
// phase 1. update y: y = beta * y
magma_int_t num_threads = MAGMA_CSR5_THREAD_GROUP;
magma_int_t num_blocks = magma_ceildiv( m, num_threads );
//ceil ((float)m / (float)num_threads);
cgecsr5mv_kernel_update_y
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>(m, beta, dy);
// phase 2. spmv: y += alpha * A * x
num_threads = MAGMA_CSR5_THREAD_GROUP;
num_blocks = magma_ceildiv( p-1, num_threads / MAGMA_CSR5_OMEGA );
// ceil ((float)(p-1) / (float)(num_threads / MAGMA_CSR5_OMEGA));
switch (sigma)
{
case 4:
spmv_csr5_compute_kernel<4>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 5:
spmv_csr5_compute_kernel<5>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 6:
spmv_csr5_compute_kernel<6>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 7:
spmv_csr5_compute_kernel<7>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 8:
spmv_csr5_compute_kernel<8>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 9:
spmv_csr5_compute_kernel<9>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 10:
spmv_csr5_compute_kernel<10>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 11:
spmv_csr5_compute_kernel<11>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 12:
spmv_csr5_compute_kernel<12>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 13:
spmv_csr5_compute_kernel<13>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 14:
spmv_csr5_compute_kernel<14>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 15:
spmv_csr5_compute_kernel<15>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 16:
spmv_csr5_compute_kernel<16>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 17:
spmv_csr5_compute_kernel<17>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 18:
spmv_csr5_compute_kernel<18>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 19:
spmv_csr5_compute_kernel<19>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 20:
spmv_csr5_compute_kernel<20>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 21:
spmv_csr5_compute_kernel<21>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 22:
spmv_csr5_compute_kernel<22>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 23:
spmv_csr5_compute_kernel<23>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 24:
spmv_csr5_compute_kernel<24>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 25:
spmv_csr5_compute_kernel<25>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 26:
spmv_csr5_compute_kernel<26>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 27:
spmv_csr5_compute_kernel<27>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 28:
spmv_csr5_compute_kernel<28>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 29:
spmv_csr5_compute_kernel<29>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 30:
spmv_csr5_compute_kernel<30>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 31:
spmv_csr5_compute_kernel<31>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
case 32:
spmv_csr5_compute_kernel<32>
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc,
dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha);
break;
}
num_threads = MAGMA_CSR5_THREAD_GROUP;
num_blocks = ceil((float)(p-1)/(float)num_threads);
spmv_csr5_calibrate_kernel
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(dtile_ptr, dcalibrator, dy, p);
num_threads = MAGMA_CSR5_OMEGA;
num_blocks = m - tail_tile_start;
spmv_csr5_tail_tile_kernel
<<< num_blocks, num_threads, 0, queue->cuda_stream() >>>
(drowptr, dcolind, dval, dx, dy,
tail_tile_start, p, sigma, alpha);
info = MAGMA_SUCCESS;
}
else {
info = MAGMA_ERR_NOT_SUPPORTED;
}
#endif
return info;
}
|
b8231af98ae38bb22d4ad27f320b8997baeeb97a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void TwoNodesDistanceKernel( float *twoNodesDifference, float *twoNodesDistance, int vectorLength )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < 1)
{
float sum = 0.00f;
float value;
for(int i = 0; i < vectorLength; i++)
{
value = twoNodesDifference[threadId * vectorLength + i];
sum += value*value;
}
twoNodesDistance[threadId] = sqrtf(sum);
}
} | b8231af98ae38bb22d4ad27f320b8997baeeb97a.cu | #include "includes.h"
__global__ void TwoNodesDistanceKernel( float *twoNodesDifference, float *twoNodesDistance, int vectorLength )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < 1)
{
float sum = 0.00f;
float value;
for(int i = 0; i < vectorLength; i++)
{
value = twoNodesDifference[threadId * vectorLength + i];
sum += value*value;
}
twoNodesDistance[threadId] = sqrtf(sum);
}
} |
a749ea3bf1ffb607b9387bc7bc5b7eca15d41903.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include "sum.cuh"
__global__ void cuda_hello(){
printf("Hello World from GPU %d!\n", sum(2, 3));
}
void hello_world_gpu() {
hipLaunchKernelGGL(( cuda_hello), dim3(1),dim3(1), 0, 0, );
hipDeviceSynchronize();
}
| a749ea3bf1ffb607b9387bc7bc5b7eca15d41903.cu | #include <cstdio>
#include "sum.cuh"
__global__ void cuda_hello(){
printf("Hello World from GPU %d!\n", sum(2, 3));
}
void hello_world_gpu() {
cuda_hello<<<1,1>>>();
cudaDeviceSynchronize();
}
|
2236ee009545ed6377248ab475a8baebf5c79a15.hip | // !!! This is a file automatically generated by hipify!!!
/*
* dmv_main.cu -- DMV front-end program.
*
* Copyright (C) 2010-2012, Computing Systems Laboratory (CSLab)
* Copyright (C) 2010-2012, Vasileios Karakasis
*/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "alloc.h"
#include "dmv.h"
#include "error.h"
#include "gpu_util.h"
#include "timer.h"
#ifndef VALUES_MAX
# define VALUES_MAX MAKE_VALUE_CONSTANT(1.)
#endif
#ifndef EPS
# define EPS MAKE_VALUE_CONSTANT(1.e-6)
#endif
#ifndef NR_ITER
# define NR_ITER 100
#endif
static void check_result(const value_t *test, const value_t *orig, size_t n)
{
printf("Checking ... ");
size_t i_fail = vec_equals(test, orig, n, EPS);
if (!i_fail) {
printf("PASSED\n");
} else {
printf("FAILED (index: %ld)\n", i_fail - 1);
printf("%" VALUE_FORMAT " != " "%" VALUE_FORMAT "\n",
test[i_fail-1], orig[i_fail-1]);
}
}
static void report_results(xtimer_t *timer, size_t n)
{
double elapsed_time = timer_elapsed_time(timer);
size_t flops = 2*n*n*NR_ITER;
printf("Elapsed time: %lf s\n", elapsed_time);
printf("Performance: %lf Gflop/s\n", flops*1.e-9 / elapsed_time);
}
static void print_usage()
{
printf("Usage: [GPU_KERNEL=<kernel_no>] [GPU_BLOCK_SIZE=<size>] "
"%s <matrix size>\n", program_name);
printf("GPU_KERNEL defaults to 0\n");
printf("GPU_BLOCK_SIZE defaults to 256\n");
printf("Available kernels [id:name]:\n");
size_t i;
for (i = 0; i < GPU_KERNEL_END; ++i) {
printf("\t%zd:%s\n", i, gpu_kernels[i].name);
}
}
int main(int argc, char **argv)
{
set_program_name(argv[0]);
if (argc < 2) {
warning(0, "too few arguments");
print_usage();
exit(EXIT_FAILURE);
}
size_t n = atoi(argv[1]);
if (!n)
error(0, "invalid argument: %s", argv[1]);
/* Read block size and kernel to launch from the environment */
const char *env_gpu_kernel = getenv("GPU_KERNEL");
const char *env_gpu_block_size = getenv("GPU_BLOCK_SIZE");
int kernel = (env_gpu_kernel) ? atoi(env_gpu_kernel) : GPU_NAIVE;
int block_size = (env_gpu_block_size) ? atoi(env_gpu_block_size) : 256;
size_t orig_n = n; // original matrix size
n = ((n + block_size - 1)/block_size) * block_size; //Adjusted matrix size
//int grid_size = 1; // FILLME: compute the grid size
/*
* FILLME: you can optionally adjust appropriately (increase
* only) the matrix size here if that helps you with your
* kernel code, e.g., to avoid divergent warps.
*/
printf("Matrix size: %zd\n", orig_n);
printf("Adjusted matrix size: %zd\n", n);
/*
* Allocate the structures.
*
* Initialization to zero is crucial if you adjusted the matrix
* size.
*/
value_t **A = (value_t **) calloc_2d(n, n, sizeof(**A));
if (!A)
error(1, "alloc_2d failed");
value_t *x = (value_t *) calloc(n, sizeof(*x));
if (!x)
error(1, "malloc failed");
value_t *y_serial = (value_t *) calloc(n, sizeof(*y_serial));
if (!y_serial)
error(1, "malloc failed");
value_t *y = (value_t *) calloc(n, sizeof(*y));
if (!y)
error(1, "malloc failed");
/* Initialize */
srand48(0);
mat_init_rand(A, orig_n, VALUES_MAX);
vec_init_rand(x, orig_n, VALUES_MAX);
vec_init(y_serial, orig_n, MAKE_VALUE_CONSTANT(0.0));
vec_init(y, orig_n, MAKE_VALUE_CONSTANT(0.0));
/* Setup timers */
xtimer_t timer;
/* Compute serial */
#ifdef SERIAL_KERNEL
printf(">>>> Begin of record <<<<\n");
printf("Serial version:\n");
timer_clear(&timer);
timer_start(&timer);
for (size_t i = 0; i < NR_ITER; ++i)
dmv_serial(A, x, y_serial, orig_n);
timer_stop(&timer);
report_results(&timer, orig_n);
printf(">>>> End of record <<<<\n");
#endif // SERIAL_KERNEL
#ifdef OPENMP_KERNEL
/* Compute OpenMP */
printf(">>>> Begin of record <<<<\n");
printf("OpenMP version:\n");
timer_clear(&timer);
timer_start(&timer);
for (size_t i = 0; i < NR_ITER; ++i)
dmv_omp(A, x, y, orig_n);
timer_stop(&timer);
#ifndef _NOCHECK_
check_result(y, y_serial, orig_n);
#endif
report_results(&timer, orig_n);
printf(">>>> End of record <<<<\n");
#endif // OPENMP_KERNEL
#ifdef GPU_KERNEL
/*
* FILLME: Set up the blocks, grid and shared memory depending on
* the kernel. Make any transformations to the input
* matrix here.
*/
dim3 gpu_block; // FILLME: set up the block dimensions
dim3 gpu_grid; // FILLME: set up the grid dimensions
size_t shmem_size; // FILLME: set up the shared memory size
switch(kernel) {
case 0:
gpu_block.x = 1;
gpu_block.y = block_size;
gpu_grid.x = 1;
gpu_grid.y = (n + block_size - 1)/block_size;
shmem_size = 0;
break;
case 1:
gpu_block.x = 1;
gpu_block.y = block_size;
gpu_grid.x = 1;
gpu_grid.y = (n + block_size - 1)/block_size;
shmem_size = 0;
mat_transpose(A,n);
break;
case 2:
gpu_block.x = 1;
gpu_block.y = block_size;
gpu_grid.x = 1;
gpu_grid.y = (n + block_size - 1)/block_size;
shmem_size = n * sizeof(value_t);
mat_transpose(A,n);
break;
}
printf(">>>> Begin of record <<<<\n");
printf("Block size: %dx%d\n", gpu_block.x, gpu_block.y);
printf("Grid size : %dx%d\n", gpu_grid.x, gpu_grid.y);
printf("Shared memory size: %ld bytes\n", shmem_size);
/* GPU allocations */
value_t *gpu_A = (value_t *) gpu_alloc(n*n*sizeof(*gpu_A));
if (!gpu_A)
error(0, "gpu_alloc failed: %s", gpu_get_last_errmsg());
value_t *gpu_x = (value_t *) gpu_alloc(n*sizeof(*gpu_x));
if (!gpu_x)
error(0, "gpu_alloc failed: %s", gpu_get_last_errmsg());
value_t *gpu_y = (value_t *) gpu_alloc(n*sizeof(*gpu_y));
if (!gpu_y)
error(0, "gpu_alloc failed: %s", gpu_get_last_errmsg());
/* Copy data to GPU */
if (copy_to_gpu(A[0], gpu_A, n*n*sizeof(*gpu_A)) < 0)
error(0, "copy_to_gpu failed: %s", gpu_get_last_errmsg());
if (copy_to_gpu(x, gpu_x, n*sizeof(*gpu_x)) < 0)
error(0, "copy_to_gpu failed: %s", gpu_get_last_errmsg());
/* Reset y and copy it to GPU */
vec_init(y, n, MAKE_VALUE_CONSTANT(0.0));
if (copy_to_gpu(y, gpu_y, n*sizeof(*gpu_y)) < 0)
error(0, "copy_to_gpu failed: %s", gpu_get_last_errmsg());
if (kernel >= GPU_KERNEL_END)
error(0, "the requested kernel does not exist");
printf("GPU kernel version: %s\n", gpu_kernels[kernel].name);
/* Execute and time the kernel */
timer_clear(&timer);
timer_start(&timer);
for (size_t i = 0; i < NR_ITER; ++i) {
gpu_kernels[kernel]hipLaunchKernelGGL((.fn), dim3(gpu_grid),dim3(gpu_block),shmem_size, 0,
gpu_A, gpu_x, gpu_y, n);
#ifdef _DEBUG_
hipError_t err;
if ( (err = hipGetLastError()) != hipSuccess)
error(0, "gpu kernel failed to launch: %s", gpu_get_errmsg(err));
#endif
hipDeviceSynchronize();
}
timer_stop(&timer);
/* Copy result back to host and check */
if (copy_from_gpu(y, gpu_y, n*sizeof(*y)) < 0)
error(0, "copy_from_gpu failed: %s", gpu_get_last_errmsg());
#ifndef _NOCHECK_
check_result(y, y_serial, orig_n);
#endif
report_results(&timer, orig_n);
printf(">>>> End of record <<<<\n");
#endif // GPU_KERNEL
/* Free resources on host */
free_2d((void **) A);
free(x);
free(y);
free(y_serial);
#ifdef GPU_KERNEL
/* Free resources on GPU */
gpu_free(gpu_A);
gpu_free(gpu_x);
gpu_free(gpu_y);
#endif // GPU_KERNEL
return EXIT_SUCCESS;
} | 2236ee009545ed6377248ab475a8baebf5c79a15.cu | /*
* dmv_main.cu -- DMV front-end program.
*
* Copyright (C) 2010-2012, Computing Systems Laboratory (CSLab)
* Copyright (C) 2010-2012, Vasileios Karakasis
*/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include "alloc.h"
#include "dmv.h"
#include "error.h"
#include "gpu_util.h"
#include "timer.h"
#ifndef VALUES_MAX
# define VALUES_MAX MAKE_VALUE_CONSTANT(1.)
#endif
#ifndef EPS
# define EPS MAKE_VALUE_CONSTANT(1.e-6)
#endif
#ifndef NR_ITER
# define NR_ITER 100
#endif
static void check_result(const value_t *test, const value_t *orig, size_t n)
{
printf("Checking ... ");
size_t i_fail = vec_equals(test, orig, n, EPS);
if (!i_fail) {
printf("PASSED\n");
} else {
printf("FAILED (index: %ld)\n", i_fail - 1);
printf("%" VALUE_FORMAT " != " "%" VALUE_FORMAT "\n",
test[i_fail-1], orig[i_fail-1]);
}
}
static void report_results(xtimer_t *timer, size_t n)
{
double elapsed_time = timer_elapsed_time(timer);
size_t flops = 2*n*n*NR_ITER;
printf("Elapsed time: %lf s\n", elapsed_time);
printf("Performance: %lf Gflop/s\n", flops*1.e-9 / elapsed_time);
}
static void print_usage()
{
printf("Usage: [GPU_KERNEL=<kernel_no>] [GPU_BLOCK_SIZE=<size>] "
"%s <matrix size>\n", program_name);
printf("GPU_KERNEL defaults to 0\n");
printf("GPU_BLOCK_SIZE defaults to 256\n");
printf("Available kernels [id:name]:\n");
size_t i;
for (i = 0; i < GPU_KERNEL_END; ++i) {
printf("\t%zd:%s\n", i, gpu_kernels[i].name);
}
}
int main(int argc, char **argv)
{
set_program_name(argv[0]);
if (argc < 2) {
warning(0, "too few arguments");
print_usage();
exit(EXIT_FAILURE);
}
size_t n = atoi(argv[1]);
if (!n)
error(0, "invalid argument: %s", argv[1]);
/* Read block size and kernel to launch from the environment */
const char *env_gpu_kernel = getenv("GPU_KERNEL");
const char *env_gpu_block_size = getenv("GPU_BLOCK_SIZE");
int kernel = (env_gpu_kernel) ? atoi(env_gpu_kernel) : GPU_NAIVE;
int block_size = (env_gpu_block_size) ? atoi(env_gpu_block_size) : 256;
size_t orig_n = n; // original matrix size
n = ((n + block_size - 1)/block_size) * block_size; //Adjusted matrix size
//int grid_size = 1; // FILLME: compute the grid size
/*
* FILLME: you can optionally adjust appropriately (increase
* only) the matrix size here if that helps you with your
* kernel code, e.g., to avoid divergent warps.
*/
printf("Matrix size: %zd\n", orig_n);
printf("Adjusted matrix size: %zd\n", n);
/*
* Allocate the structures.
*
* Initialization to zero is crucial if you adjusted the matrix
* size.
*/
value_t **A = (value_t **) calloc_2d(n, n, sizeof(**A));
if (!A)
error(1, "alloc_2d failed");
value_t *x = (value_t *) calloc(n, sizeof(*x));
if (!x)
error(1, "malloc failed");
value_t *y_serial = (value_t *) calloc(n, sizeof(*y_serial));
if (!y_serial)
error(1, "malloc failed");
value_t *y = (value_t *) calloc(n, sizeof(*y));
if (!y)
error(1, "malloc failed");
/* Initialize */
srand48(0);
mat_init_rand(A, orig_n, VALUES_MAX);
vec_init_rand(x, orig_n, VALUES_MAX);
vec_init(y_serial, orig_n, MAKE_VALUE_CONSTANT(0.0));
vec_init(y, orig_n, MAKE_VALUE_CONSTANT(0.0));
/* Setup timers */
xtimer_t timer;
/* Compute serial */
#ifdef SERIAL_KERNEL
printf(">>>> Begin of record <<<<\n");
printf("Serial version:\n");
timer_clear(&timer);
timer_start(&timer);
for (size_t i = 0; i < NR_ITER; ++i)
dmv_serial(A, x, y_serial, orig_n);
timer_stop(&timer);
report_results(&timer, orig_n);
printf(">>>> End of record <<<<\n");
#endif // SERIAL_KERNEL
#ifdef OPENMP_KERNEL
/* Compute OpenMP */
printf(">>>> Begin of record <<<<\n");
printf("OpenMP version:\n");
timer_clear(&timer);
timer_start(&timer);
for (size_t i = 0; i < NR_ITER; ++i)
dmv_omp(A, x, y, orig_n);
timer_stop(&timer);
#ifndef _NOCHECK_
check_result(y, y_serial, orig_n);
#endif
report_results(&timer, orig_n);
printf(">>>> End of record <<<<\n");
#endif // OPENMP_KERNEL
#ifdef GPU_KERNEL
/*
* FILLME: Set up the blocks, grid and shared memory depending on
* the kernel. Make any transformations to the input
* matrix here.
*/
dim3 gpu_block; // FILLME: set up the block dimensions
dim3 gpu_grid; // FILLME: set up the grid dimensions
size_t shmem_size; // FILLME: set up the shared memory size
switch(kernel) {
case 0:
gpu_block.x = 1;
gpu_block.y = block_size;
gpu_grid.x = 1;
gpu_grid.y = (n + block_size - 1)/block_size;
shmem_size = 0;
break;
case 1:
gpu_block.x = 1;
gpu_block.y = block_size;
gpu_grid.x = 1;
gpu_grid.y = (n + block_size - 1)/block_size;
shmem_size = 0;
mat_transpose(A,n);
break;
case 2:
gpu_block.x = 1;
gpu_block.y = block_size;
gpu_grid.x = 1;
gpu_grid.y = (n + block_size - 1)/block_size;
shmem_size = n * sizeof(value_t);
mat_transpose(A,n);
break;
}
printf(">>>> Begin of record <<<<\n");
printf("Block size: %dx%d\n", gpu_block.x, gpu_block.y);
printf("Grid size : %dx%d\n", gpu_grid.x, gpu_grid.y);
printf("Shared memory size: %ld bytes\n", shmem_size);
/* GPU allocations */
value_t *gpu_A = (value_t *) gpu_alloc(n*n*sizeof(*gpu_A));
if (!gpu_A)
error(0, "gpu_alloc failed: %s", gpu_get_last_errmsg());
value_t *gpu_x = (value_t *) gpu_alloc(n*sizeof(*gpu_x));
if (!gpu_x)
error(0, "gpu_alloc failed: %s", gpu_get_last_errmsg());
value_t *gpu_y = (value_t *) gpu_alloc(n*sizeof(*gpu_y));
if (!gpu_y)
error(0, "gpu_alloc failed: %s", gpu_get_last_errmsg());
/* Copy data to GPU */
if (copy_to_gpu(A[0], gpu_A, n*n*sizeof(*gpu_A)) < 0)
error(0, "copy_to_gpu failed: %s", gpu_get_last_errmsg());
if (copy_to_gpu(x, gpu_x, n*sizeof(*gpu_x)) < 0)
error(0, "copy_to_gpu failed: %s", gpu_get_last_errmsg());
/* Reset y and copy it to GPU */
vec_init(y, n, MAKE_VALUE_CONSTANT(0.0));
if (copy_to_gpu(y, gpu_y, n*sizeof(*gpu_y)) < 0)
error(0, "copy_to_gpu failed: %s", gpu_get_last_errmsg());
if (kernel >= GPU_KERNEL_END)
error(0, "the requested kernel does not exist");
printf("GPU kernel version: %s\n", gpu_kernels[kernel].name);
/* Execute and time the kernel */
timer_clear(&timer);
timer_start(&timer);
for (size_t i = 0; i < NR_ITER; ++i) {
gpu_kernels[kernel].fn<<<gpu_grid,gpu_block,shmem_size>>>
(gpu_A, gpu_x, gpu_y, n);
#ifdef _DEBUG_
cudaError_t err;
if ( (err = cudaGetLastError()) != cudaSuccess)
error(0, "gpu kernel failed to launch: %s", gpu_get_errmsg(err));
#endif
cudaThreadSynchronize();
}
timer_stop(&timer);
/* Copy result back to host and check */
if (copy_from_gpu(y, gpu_y, n*sizeof(*y)) < 0)
error(0, "copy_from_gpu failed: %s", gpu_get_last_errmsg());
#ifndef _NOCHECK_
check_result(y, y_serial, orig_n);
#endif
report_results(&timer, orig_n);
printf(">>>> End of record <<<<\n");
#endif // GPU_KERNEL
/* Free resources on host */
free_2d((void **) A);
free(x);
free(y);
free(y_serial);
#ifdef GPU_KERNEL
/* Free resources on GPU */
gpu_free(gpu_A);
gpu_free(gpu_x);
gpu_free(gpu_y);
#endif // GPU_KERNEL
return EXIT_SUCCESS;
} |
9161f8a180f0bc8ccaddb4f41699a61aee92337f.hip | // !!! This is a file automatically generated by hipify!!!
//
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
//
// kernel routine
//
__global__ void dot_product(const int *a, const int *b, int *c)
{
// each thread in a block sharing the memory, temp
__shared__ int temp[THREADS_PER_BLOCK];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
temp[threadIdx.x] = a[idx] * b[idx];
__syncthreads();
if (0 == threadIdx.x) {
int sum = 0;
/* iterate over only threads in the block */
for (int i=0; i<THREADS_PER_BLOCK; ++i)
sum += temp[i];
/* Tricks: only works for sm_11... read the simpleAtomicIntrinsics sample */
atomicAdd( c, sum );
}
}
//
// main code
//
int main(int argc, char **argv)
{
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = N * sizeof(int);
int result = 0;
time_t t;
// initialise card - legacy code
//cutilDeviceInit(argc, argv);
srand((unsigned) time(&t));
printf("DEBUG: Size of 'int' type: %lu\n", sizeof(int));
printf("DEBUG: Total footprint size: %d bytes\n", size);
// allocate device copies of a, b, c
hipMalloc( (void**)&dev_a, size );
hipMalloc( (void**)&dev_b, size );
hipMalloc( (void**)&dev_c, sizeof(int) );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( sizeof(int) );
for (int i=0; i<N; i++)
{
#if 0
a[i] = rand()%N;
b[i] = rand()%N;
#else
a[i] = 5;
b[i] = 5;
#endif
}
printf("DEBUG: a[%d]=%d, b[%d]=%d\n",0, a[0], 0, b[0]);
printf("DEBUG: a[%d]=%d, b[%d]=%d\n",1, a[1], 1, b[1]);
// copy inputs to device
hipMemcpy( dev_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, size, hipMemcpyHostToDevice );
// the bug is lacking of this line... sigh
hipMemcpy( dev_c, c, sizeof(int), hipMemcpyHostToDevice );
int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// launch dot_product() kernel with N parallel blocks
printf("INFO: Launching CUDA kernel: dot product with blocks=%d, threads=%d...", blocksPerGrid, THREADS_PER_BLOCK);
hipLaunchKernelGGL(( dot_product), dim3(blocksPerGrid), dim3(THREADS_PER_BLOCK) , 0, 0, dev_a, dev_b, dev_c );
//dot_product<<< N/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( dev_a, dev_b, dev_c );
printf(" Done\n");
printf("DEBUG: c2 is: %d @ %p\n", *c, &c);
// copy device result back to host copy of c
hipMemcpy( c, dev_c, sizeof(int), hipMemcpyDeviceToHost );
printf("DEBUG: c3 is: %d @ %p\n", *c, &c);
#if 1
//result = 0;
for (int i=0; i<N; i++)
{
result += a[i] * b[i];
}
if (fabs(result - *c) < 1e-5)
printf("INFO: PASS\n");
else
printf("ERROR: *** FAILED *** sum=%d\n", result);
#endif
#if 1
printf("DEBUG: a[0]=%d, b[0]=%d\n", a[0], b[0]);
printf("DEBUG: a[%d]=%d, b[%d]=%d, c=%d\n", 1, a[1], 1, b[1], *c);
#endif
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
free( a );
free( b );
free( c );
hipDeviceReset();
return 0;
}
| 9161f8a180f0bc8ccaddb4f41699a61aee92337f.cu | //
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <time.h>
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
//
// kernel routine
//
__global__ void dot_product(const int *a, const int *b, int *c)
{
// each thread in a block sharing the memory, temp
__shared__ int temp[THREADS_PER_BLOCK];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
temp[threadIdx.x] = a[idx] * b[idx];
__syncthreads();
if (0 == threadIdx.x) {
int sum = 0;
/* iterate over only threads in the block */
for (int i=0; i<THREADS_PER_BLOCK; ++i)
sum += temp[i];
/* Tricks: only works for sm_11... read the simpleAtomicIntrinsics sample */
atomicAdd( c, sum );
}
}
//
// main code
//
int main(int argc, char **argv)
{
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = N * sizeof(int);
int result = 0;
time_t t;
// initialise card - legacy code
//cutilDeviceInit(argc, argv);
srand((unsigned) time(&t));
printf("DEBUG: Size of 'int' type: %lu\n", sizeof(int));
printf("DEBUG: Total footprint size: %d bytes\n", size);
// allocate device copies of a, b, c
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, sizeof(int) );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( sizeof(int) );
for (int i=0; i<N; i++)
{
#if 0
a[i] = rand()%N;
b[i] = rand()%N;
#else
a[i] = 5;
b[i] = 5;
#endif
}
printf("DEBUG: a[%d]=%d, b[%d]=%d\n",0, a[0], 0, b[0]);
printf("DEBUG: a[%d]=%d, b[%d]=%d\n",1, a[1], 1, b[1]);
// copy inputs to device
cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice );
// the bug is lacking of this line... sigh
cudaMemcpy( dev_c, c, sizeof(int), cudaMemcpyHostToDevice );
int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// launch dot_product() kernel with N parallel blocks
printf("INFO: Launching CUDA kernel: dot product with blocks=%d, threads=%d...", blocksPerGrid, THREADS_PER_BLOCK);
dot_product<<< blocksPerGrid, THREADS_PER_BLOCK >>>( dev_a, dev_b, dev_c );
//dot_product<<< N/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( dev_a, dev_b, dev_c );
printf(" Done\n");
printf("DEBUG: c2 is: %d @ %p\n", *c, &c);
// copy device result back to host copy of c
cudaMemcpy( c, dev_c, sizeof(int), cudaMemcpyDeviceToHost );
printf("DEBUG: c3 is: %d @ %p\n", *c, &c);
#if 1
//result = 0;
for (int i=0; i<N; i++)
{
result += a[i] * b[i];
}
if (fabs(result - *c) < 1e-5)
printf("INFO: PASS\n");
else
printf("ERROR: *** FAILED *** sum=%d\n", result);
#endif
#if 1
printf("DEBUG: a[0]=%d, b[0]=%d\n", a[0], b[0]);
printf("DEBUG: a[%d]=%d, b[%d]=%d, c=%d\n", 1, a[1], 1, b[1], *c);
#endif
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
free( a );
free( b );
free( c );
cudaDeviceReset();
return 0;
}
|
0f0a27002464225c99df8cdc9f4010cb24ef6570.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void mapPredicate(unsigned int *d_zeros, unsigned int *d_ones, unsigned int *d_in, unsigned int bit, size_t n)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = BLOCK_WIDTH * bx + tx;
if(index < n) {
unsigned int isOne = (d_in[index] >> bit) & 1;
d_ones[index] = isOne;
d_zeros[index] = 1 - isOne;
}
} | 0f0a27002464225c99df8cdc9f4010cb24ef6570.cu | #include "includes.h"
__global__ void mapPredicate(unsigned int *d_zeros, unsigned int *d_ones, unsigned int *d_in, unsigned int bit, size_t n)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = BLOCK_WIDTH * bx + tx;
if(index < n) {
unsigned int isOne = (d_in[index] >> bit) & 1;
d_ones[index] = isOne;
d_zeros[index] = 1 - isOne;
}
} |
a9fa6f80e1941bdde72045f9fa8293dc520346e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
namespace cv { namespace gpu { namespace device
{
namespace match_template
{
__device__ __forceinline__ float sum(float v) { return v; }
__device__ __forceinline__ float sum(float2 v) { return v.x + v.y; }
__device__ __forceinline__ float sum(float3 v) { return v.x + v.y + v.z; }
__device__ __forceinline__ float sum(float4 v) { return v.x + v.y + v.z + v.w; }
__device__ __forceinline__ float first(float v) { return v; }
__device__ __forceinline__ float first(float2 v) { return v.x; }
__device__ __forceinline__ float first(float3 v) { return v.x; }
__device__ __forceinline__ float first(float4 v) { return v.x; }
__device__ __forceinline__ float mul(float a, float b) { return a * b; }
__device__ __forceinline__ float2 mul(float2 a, float2 b) { return make_float2(a.x * b.x, a.y * b.y); }
__device__ __forceinline__ float3 mul(float3 a, float3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); }
__device__ __forceinline__ float4 mul(float4 a, float4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); }
__device__ __forceinline__ float mul(uchar a, uchar b) { return a * b; }
__device__ __forceinline__ float2 mul(uchar2 a, uchar2 b) { return make_float2(a.x * b.x, a.y * b.y); }
__device__ __forceinline__ float3 mul(uchar3 a, uchar3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); }
__device__ __forceinline__ float4 mul(uchar4 a, uchar4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); }
__device__ __forceinline__ float sub(float a, float b) { return a - b; }
__device__ __forceinline__ float2 sub(float2 a, float2 b) { return make_float2(a.x - b.x, a.y - b.y); }
__device__ __forceinline__ float3 sub(float3 a, float3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); }
__device__ __forceinline__ float4 sub(float4 a, float4 b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); }
__device__ __forceinline__ float sub(uchar a, uchar b) { return a - b; }
__device__ __forceinline__ float2 sub(uchar2 a, uchar2 b) { return make_float2(a.x - b.x, a.y - b.y); }
__device__ __forceinline__ float3 sub(uchar3 a, uchar3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); }
__device__ __forceinline__ float4 sub(uchar4 a, uchar4 b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); }
//////////////////////////////////////////////////////////////////////
// Naive_CCORR
template <typename T, int cn>
__global__ void matchTemplateNaiveKernel_CCORR(int w, int h, const PtrStepb image, const PtrStepb templ, PtrStepSzf result)
{
typedef typename TypeVec<T, cn>::vec_type Type;
typedef typename TypeVec<float, cn>::vec_type Typef;
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
Typef res = VecTraits<Typef>::all(0);
for (int i = 0; i < h; ++i)
{
const Type* image_ptr = (const Type*)image.ptr(y + i);
const Type* templ_ptr = (const Type*)templ.ptr(i);
for (int j = 0; j < w; ++j)
res = res + mul(image_ptr[x + j], templ_ptr[j]);
}
result.ptr(y)[x] = sum(res);
}
}
template <typename T, int cn>
void matchTemplateNaive_CCORR(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, hipStream_t stream)
{
const dim3 threads(32, 8);
const dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
hipLaunchKernelGGL(( matchTemplateNaiveKernel_CCORR<T, cn>), dim3(grid), dim3(threads), 0, stream, templ.cols, templ.rows, image, templ, result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void matchTemplateNaive_CCORR_32F(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, int cn, hipStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, hipStream_t stream);
static const caller_t callers[] =
{
0, matchTemplateNaive_CCORR<float, 1>, matchTemplateNaive_CCORR<float, 2>, matchTemplateNaive_CCORR<float, 3>, matchTemplateNaive_CCORR<float, 4>
};
callers[cn](image, templ, result, stream);
}
void matchTemplateNaive_CCORR_8U(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, int cn, hipStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, hipStream_t stream);
static const caller_t callers[] =
{
0, matchTemplateNaive_CCORR<uchar, 1>, matchTemplateNaive_CCORR<uchar, 2>, matchTemplateNaive_CCORR<uchar, 3>, matchTemplateNaive_CCORR<uchar, 4>
};
callers[cn](image, templ, result, stream);
}
//////////////////////////////////////////////////////////////////////
// Naive_SQDIFF
template <typename T, int cn>
__global__ void matchTemplateNaiveKernel_SQDIFF(int w, int h, const PtrStepb image, const PtrStepb templ, PtrStepSzf result)
{
typedef typename TypeVec<T, cn>::vec_type Type;
typedef typename TypeVec<float, cn>::vec_type Typef;
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
Typef res = VecTraits<Typef>::all(0);
Typef delta;
for (int i = 0; i < h; ++i)
{
const Type* image_ptr = (const Type*)image.ptr(y + i);
const Type* templ_ptr = (const Type*)templ.ptr(i);
for (int j = 0; j < w; ++j)
{
delta = sub(image_ptr[x + j], templ_ptr[j]);
res = res + delta * delta;
}
}
result.ptr(y)[x] = sum(res);
}
}
template <typename T, int cn>
void matchTemplateNaive_SQDIFF(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, hipStream_t stream)
{
const dim3 threads(32, 8);
const dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
hipLaunchKernelGGL(( matchTemplateNaiveKernel_SQDIFF<T, cn>), dim3(grid), dim3(threads), 0, stream, templ.cols, templ.rows, image, templ, result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void matchTemplateNaive_SQDIFF_32F(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, int cn, hipStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, hipStream_t stream);
static const caller_t callers[] =
{
0, matchTemplateNaive_SQDIFF<float, 1>, matchTemplateNaive_SQDIFF<float, 2>, matchTemplateNaive_SQDIFF<float, 3>, matchTemplateNaive_SQDIFF<float, 4>
};
callers[cn](image, templ, result, stream);
}
void matchTemplateNaive_SQDIFF_8U(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, int cn, hipStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, hipStream_t stream);
static const caller_t callers[] =
{
0, matchTemplateNaive_SQDIFF<uchar, 1>, matchTemplateNaive_SQDIFF<uchar, 2>, matchTemplateNaive_SQDIFF<uchar, 3>, matchTemplateNaive_SQDIFF<uchar, 4>
};
callers[cn](image, templ, result, stream);
}
//////////////////////////////////////////////////////////////////////
// Prepared_SQDIFF
template <int cn>
__global__ void matchTemplatePreparedKernel_SQDIFF_8U(int w, int h, const PtrStep<unsigned long long> image_sqsum, unsigned long long templ_sqsum, PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sqsum_ = (float)(
(image_sqsum.ptr(y + h)[(x + w) * cn] - image_sqsum.ptr(y)[(x + w) * cn]) -
(image_sqsum.ptr(y + h)[x * cn] - image_sqsum.ptr(y)[x * cn]));
float ccorr = result.ptr(y)[x];
result.ptr(y)[x] = image_sqsum_ - 2.f * ccorr + templ_sqsum;
}
}
template <int cn>
void matchTemplatePrepared_SQDIFF_8U(int w, int h, const PtrStepSz<unsigned long long> image_sqsum, unsigned long long templ_sqsum, PtrStepSzf result, hipStream_t stream)
{
const dim3 threads(32, 8);
const dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
hipLaunchKernelGGL(( matchTemplatePreparedKernel_SQDIFF_8U<cn>), dim3(grid), dim3(threads), 0, stream, w, h, image_sqsum, templ_sqsum, result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void matchTemplatePrepared_SQDIFF_8U(int w, int h, const PtrStepSz<unsigned long long> image_sqsum, unsigned long long templ_sqsum, PtrStepSzf result, int cn,
hipStream_t stream)
{
typedef void (*caller_t)(int w, int h, const PtrStepSz<unsigned long long> image_sqsum, unsigned long long templ_sqsum, PtrStepSzf result, hipStream_t stream);
static const caller_t callers[] =
{
0, matchTemplatePrepared_SQDIFF_8U<1>, matchTemplatePrepared_SQDIFF_8U<2>, matchTemplatePrepared_SQDIFF_8U<3>, matchTemplatePrepared_SQDIFF_8U<4>
};
callers[cn](w, h, image_sqsum, templ_sqsum, result, stream);
}
//////////////////////////////////////////////////////////////////////
// Prepared_SQDIFF_NORMED
// normAcc* are accurate normalization routines which make GPU matchTemplate
// consistent with CPU one
__device__ float normAcc(float num, float denum)
{
if (::fabs(num) < denum)
return num / denum;
if (::fabs(num) < denum * 1.125f)
return num > 0 ? 1 : -1;
return 0;
}
__device__ float normAcc_SQDIFF(float num, float denum)
{
if (::fabs(num) < denum)
return num / denum;
if (::fabs(num) < denum * 1.125f)
return num > 0 ? 1 : -1;
return 1;
}
template <int cn>
__global__ void matchTemplatePreparedKernel_SQDIFF_NORMED_8U(
int w, int h, const PtrStep<unsigned long long> image_sqsum,
unsigned long long templ_sqsum, PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sqsum_ = (float)(
(image_sqsum.ptr(y + h)[(x + w) * cn] - image_sqsum.ptr(y)[(x + w) * cn]) -
(image_sqsum.ptr(y + h)[x * cn] - image_sqsum.ptr(y)[x * cn]));
float ccorr = result.ptr(y)[x];
result.ptr(y)[x] = normAcc_SQDIFF(image_sqsum_ - 2.f * ccorr + templ_sqsum,
sqrtf(image_sqsum_ * templ_sqsum));
}
}
template <int cn>
void matchTemplatePrepared_SQDIFF_NORMED_8U(int w, int h, const PtrStepSz<unsigned long long> image_sqsum, unsigned long long templ_sqsum,
PtrStepSzf result, hipStream_t stream)
{
const dim3 threads(32, 8);
const dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
hipLaunchKernelGGL(( matchTemplatePreparedKernel_SQDIFF_NORMED_8U<cn>), dim3(grid), dim3(threads), 0, stream, w, h, image_sqsum, templ_sqsum, result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void matchTemplatePrepared_SQDIFF_NORMED_8U(int w, int h, const PtrStepSz<unsigned long long> image_sqsum, unsigned long long templ_sqsum,
PtrStepSzf result, int cn, hipStream_t stream)
{
typedef void (*caller_t)(int w, int h, const PtrStepSz<unsigned long long> image_sqsum, unsigned long long templ_sqsum, PtrStepSzf result, hipStream_t stream);
static const caller_t callers[] =
{
0, matchTemplatePrepared_SQDIFF_NORMED_8U<1>, matchTemplatePrepared_SQDIFF_NORMED_8U<2>, matchTemplatePrepared_SQDIFF_NORMED_8U<3>, matchTemplatePrepared_SQDIFF_NORMED_8U<4>
};
callers[cn](w, h, image_sqsum, templ_sqsum, result, stream);
}
//////////////////////////////////////////////////////////////////////
// Prepared_CCOFF
__global__ void matchTemplatePreparedKernel_CCOFF_8U(int w, int h, float templ_sum_scale, const PtrStep<unsigned int> image_sum, PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_ = (float)(
(image_sum.ptr(y + h)[x + w] - image_sum.ptr(y)[x + w]) -
(image_sum.ptr(y + h)[x] - image_sum.ptr(y)[x]));
float ccorr = result.ptr(y)[x];
result.ptr(y)[x] = ccorr - image_sum_ * templ_sum_scale;
}
}
void matchTemplatePrepared_CCOFF_8U(int w, int h, const PtrStepSz<unsigned int> image_sum, unsigned int templ_sum, PtrStepSzf result, hipStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
hipLaunchKernelGGL(( matchTemplatePreparedKernel_CCOFF_8U), dim3(grid), dim3(threads), 0, stream, w, h, (float)templ_sum / (w * h), image_sum, result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void matchTemplatePreparedKernel_CCOFF_8UC2(
int w, int h, float templ_sum_scale_r, float templ_sum_scale_g,
const PtrStep<unsigned int> image_sum_r,
const PtrStep<unsigned int> image_sum_g,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_r_ = (float)(
(image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) -
(image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x]));
float image_sum_g_ = (float)(
(image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) -
(image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x]));
float ccorr = result.ptr(y)[x];
result.ptr(y)[x] = ccorr - image_sum_r_ * templ_sum_scale_r
- image_sum_g_ * templ_sum_scale_g;
}
}
void matchTemplatePrepared_CCOFF_8UC2(
int w, int h,
const PtrStepSz<unsigned int> image_sum_r,
const PtrStepSz<unsigned int> image_sum_g,
unsigned int templ_sum_r, unsigned int templ_sum_g,
PtrStepSzf result, hipStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
hipLaunchKernelGGL(( matchTemplatePreparedKernel_CCOFF_8UC2), dim3(grid), dim3(threads), 0, stream,
w, h, (float)templ_sum_r / (w * h), (float)templ_sum_g / (w * h),
image_sum_r, image_sum_g, result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void matchTemplatePreparedKernel_CCOFF_8UC3(
int w, int h,
float templ_sum_scale_r,
float templ_sum_scale_g,
float templ_sum_scale_b,
const PtrStep<unsigned int> image_sum_r,
const PtrStep<unsigned int> image_sum_g,
const PtrStep<unsigned int> image_sum_b,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_r_ = (float)(
(image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) -
(image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x]));
float image_sum_g_ = (float)(
(image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) -
(image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x]));
float image_sum_b_ = (float)(
(image_sum_b.ptr(y + h)[x + w] - image_sum_b.ptr(y)[x + w]) -
(image_sum_b.ptr(y + h)[x] - image_sum_b.ptr(y)[x]));
float ccorr = result.ptr(y)[x];
result.ptr(y)[x] = ccorr - image_sum_r_ * templ_sum_scale_r
- image_sum_g_ * templ_sum_scale_g
- image_sum_b_ * templ_sum_scale_b;
}
}
void matchTemplatePrepared_CCOFF_8UC3(
int w, int h,
const PtrStepSz<unsigned int> image_sum_r,
const PtrStepSz<unsigned int> image_sum_g,
const PtrStepSz<unsigned int> image_sum_b,
unsigned int templ_sum_r,
unsigned int templ_sum_g,
unsigned int templ_sum_b,
PtrStepSzf result, hipStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
hipLaunchKernelGGL(( matchTemplatePreparedKernel_CCOFF_8UC3), dim3(grid), dim3(threads), 0, stream,
w, h,
(float)templ_sum_r / (w * h),
(float)templ_sum_g / (w * h),
(float)templ_sum_b / (w * h),
image_sum_r, image_sum_g, image_sum_b, result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void matchTemplatePreparedKernel_CCOFF_8UC4(
int w, int h,
float templ_sum_scale_r,
float templ_sum_scale_g,
float templ_sum_scale_b,
float templ_sum_scale_a,
const PtrStep<unsigned int> image_sum_r,
const PtrStep<unsigned int> image_sum_g,
const PtrStep<unsigned int> image_sum_b,
const PtrStep<unsigned int> image_sum_a,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_r_ = (float)(
(image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) -
(image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x]));
float image_sum_g_ = (float)(
(image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) -
(image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x]));
float image_sum_b_ = (float)(
(image_sum_b.ptr(y + h)[x + w] - image_sum_b.ptr(y)[x + w]) -
(image_sum_b.ptr(y + h)[x] - image_sum_b.ptr(y)[x]));
float image_sum_a_ = (float)(
(image_sum_a.ptr(y + h)[x + w] - image_sum_a.ptr(y)[x + w]) -
(image_sum_a.ptr(y + h)[x] - image_sum_a.ptr(y)[x]));
float ccorr = result.ptr(y)[x];
result.ptr(y)[x] = ccorr - image_sum_r_ * templ_sum_scale_r
- image_sum_g_ * templ_sum_scale_g
- image_sum_b_ * templ_sum_scale_b
- image_sum_a_ * templ_sum_scale_a;
}
}
void matchTemplatePrepared_CCOFF_8UC4(
int w, int h,
const PtrStepSz<unsigned int> image_sum_r,
const PtrStepSz<unsigned int> image_sum_g,
const PtrStepSz<unsigned int> image_sum_b,
const PtrStepSz<unsigned int> image_sum_a,
unsigned int templ_sum_r,
unsigned int templ_sum_g,
unsigned int templ_sum_b,
unsigned int templ_sum_a,
PtrStepSzf result, hipStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
hipLaunchKernelGGL(( matchTemplatePreparedKernel_CCOFF_8UC4), dim3(grid), dim3(threads), 0, stream,
w, h,
(float)templ_sum_r / (w * h),
(float)templ_sum_g / (w * h),
(float)templ_sum_b / (w * h),
(float)templ_sum_a / (w * h),
image_sum_r, image_sum_g, image_sum_b, image_sum_a,
result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////
// Prepared_CCOFF_NORMED
__global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8U(
int w, int h, float weight,
float templ_sum_scale, float templ_sqsum_scale,
const PtrStep<unsigned int> image_sum,
const PtrStep<unsigned long long> image_sqsum,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float ccorr = result.ptr(y)[x];
float image_sum_ = (float)(
(image_sum.ptr(y + h)[x + w] - image_sum.ptr(y)[x + w]) -
(image_sum.ptr(y + h)[x] - image_sum.ptr(y)[x]));
float image_sqsum_ = (float)(
(image_sqsum.ptr(y + h)[x + w] - image_sqsum.ptr(y)[x + w]) -
(image_sqsum.ptr(y + h)[x] - image_sqsum.ptr(y)[x]));
result.ptr(y)[x] = normAcc(ccorr - image_sum_ * templ_sum_scale,
sqrtf(templ_sqsum_scale * (image_sqsum_ - weight * image_sum_ * image_sum_)));
}
}
void matchTemplatePrepared_CCOFF_NORMED_8U(
int w, int h, const PtrStepSz<unsigned int> image_sum,
const PtrStepSz<unsigned long long> image_sqsum,
unsigned int templ_sum, unsigned long long templ_sqsum,
PtrStepSzf result, hipStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
float weight = 1.f / (w * h);
float templ_sum_scale = templ_sum * weight;
float templ_sqsum_scale = templ_sqsum - weight * templ_sum * templ_sum;
hipLaunchKernelGGL(( matchTemplatePreparedKernel_CCOFF_NORMED_8U), dim3(grid), dim3(threads), 0, stream,
w, h, weight, templ_sum_scale, templ_sqsum_scale,
image_sum, image_sqsum, result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC2(
int w, int h, float weight,
float templ_sum_scale_r, float templ_sum_scale_g,
float templ_sqsum_scale,
const PtrStep<unsigned int> image_sum_r, const PtrStep<unsigned long long> image_sqsum_r,
const PtrStep<unsigned int> image_sum_g, const PtrStep<unsigned long long> image_sqsum_g,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_r_ = (float)(
(image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) -
(image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x]));
float image_sqsum_r_ = (float)(
(image_sqsum_r.ptr(y + h)[x + w] - image_sqsum_r.ptr(y)[x + w]) -
(image_sqsum_r.ptr(y + h)[x] - image_sqsum_r.ptr(y)[x]));
float image_sum_g_ = (float)(
(image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) -
(image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x]));
float image_sqsum_g_ = (float)(
(image_sqsum_g.ptr(y + h)[x + w] - image_sqsum_g.ptr(y)[x + w]) -
(image_sqsum_g.ptr(y + h)[x] - image_sqsum_g.ptr(y)[x]));
float num = result.ptr(y)[x] - image_sum_r_ * templ_sum_scale_r
- image_sum_g_ * templ_sum_scale_g;
float denum = sqrtf(templ_sqsum_scale * (image_sqsum_r_ - weight * image_sum_r_ * image_sum_r_
+ image_sqsum_g_ - weight * image_sum_g_ * image_sum_g_));
result.ptr(y)[x] = normAcc(num, denum);
}
}
void matchTemplatePrepared_CCOFF_NORMED_8UC2(
int w, int h,
const PtrStepSz<unsigned int> image_sum_r, const PtrStepSz<unsigned long long> image_sqsum_r,
const PtrStepSz<unsigned int> image_sum_g, const PtrStepSz<unsigned long long> image_sqsum_g,
unsigned int templ_sum_r, unsigned long long templ_sqsum_r,
unsigned int templ_sum_g, unsigned long long templ_sqsum_g,
PtrStepSzf result, hipStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
float weight = 1.f / (w * h);
float templ_sum_scale_r = templ_sum_r * weight;
float templ_sum_scale_g = templ_sum_g * weight;
float templ_sqsum_scale = templ_sqsum_r - weight * templ_sum_r * templ_sum_r
+ templ_sqsum_g - weight * templ_sum_g * templ_sum_g;
hipLaunchKernelGGL(( matchTemplatePreparedKernel_CCOFF_NORMED_8UC2), dim3(grid), dim3(threads), 0, stream,
w, h, weight,
templ_sum_scale_r, templ_sum_scale_g,
templ_sqsum_scale,
image_sum_r, image_sqsum_r,
image_sum_g, image_sqsum_g,
result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC3(
int w, int h, float weight,
float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b,
float templ_sqsum_scale,
const PtrStep<unsigned int> image_sum_r, const PtrStep<unsigned long long> image_sqsum_r,
const PtrStep<unsigned int> image_sum_g, const PtrStep<unsigned long long> image_sqsum_g,
const PtrStep<unsigned int> image_sum_b, const PtrStep<unsigned long long> image_sqsum_b,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_r_ = (float)(
(image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) -
(image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x]));
float image_sqsum_r_ = (float)(
(image_sqsum_r.ptr(y + h)[x + w] - image_sqsum_r.ptr(y)[x + w]) -
(image_sqsum_r.ptr(y + h)[x] - image_sqsum_r.ptr(y)[x]));
float image_sum_g_ = (float)(
(image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) -
(image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x]));
float image_sqsum_g_ = (float)(
(image_sqsum_g.ptr(y + h)[x + w] - image_sqsum_g.ptr(y)[x + w]) -
(image_sqsum_g.ptr(y + h)[x] - image_sqsum_g.ptr(y)[x]));
float image_sum_b_ = (float)(
(image_sum_b.ptr(y + h)[x + w] - image_sum_b.ptr(y)[x + w]) -
(image_sum_b.ptr(y + h)[x] - image_sum_b.ptr(y)[x]));
float image_sqsum_b_ = (float)(
(image_sqsum_b.ptr(y + h)[x + w] - image_sqsum_b.ptr(y)[x + w]) -
(image_sqsum_b.ptr(y + h)[x] - image_sqsum_b.ptr(y)[x]));
float num = result.ptr(y)[x] - image_sum_r_ * templ_sum_scale_r
- image_sum_g_ * templ_sum_scale_g
- image_sum_b_ * templ_sum_scale_b;
float denum = sqrtf(templ_sqsum_scale * (image_sqsum_r_ - weight * image_sum_r_ * image_sum_r_
+ image_sqsum_g_ - weight * image_sum_g_ * image_sum_g_
+ image_sqsum_b_ - weight * image_sum_b_ * image_sum_b_));
result.ptr(y)[x] = normAcc(num, denum);
}
}
void matchTemplatePrepared_CCOFF_NORMED_8UC3(
int w, int h,
const PtrStepSz<unsigned int> image_sum_r, const PtrStepSz<unsigned long long> image_sqsum_r,
const PtrStepSz<unsigned int> image_sum_g, const PtrStepSz<unsigned long long> image_sqsum_g,
const PtrStepSz<unsigned int> image_sum_b, const PtrStepSz<unsigned long long> image_sqsum_b,
unsigned int templ_sum_r, unsigned long long templ_sqsum_r,
unsigned int templ_sum_g, unsigned long long templ_sqsum_g,
unsigned int templ_sum_b, unsigned long long templ_sqsum_b,
PtrStepSzf result, hipStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
float weight = 1.f / (w * h);
float templ_sum_scale_r = templ_sum_r * weight;
float templ_sum_scale_g = templ_sum_g * weight;
float templ_sum_scale_b = templ_sum_b * weight;
float templ_sqsum_scale = templ_sqsum_r - weight * templ_sum_r * templ_sum_r
+ templ_sqsum_g - weight * templ_sum_g * templ_sum_g
+ templ_sqsum_b - weight * templ_sum_b * templ_sum_b;
hipLaunchKernelGGL(( matchTemplatePreparedKernel_CCOFF_NORMED_8UC3), dim3(grid), dim3(threads), 0, stream,
w, h, weight,
templ_sum_scale_r, templ_sum_scale_g, templ_sum_scale_b,
templ_sqsum_scale,
image_sum_r, image_sqsum_r,
image_sum_g, image_sqsum_g,
image_sum_b, image_sqsum_b,
result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC4(
int w, int h, float weight,
float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b,
float templ_sum_scale_a, float templ_sqsum_scale,
const PtrStep<unsigned int> image_sum_r, const PtrStep<unsigned long long> image_sqsum_r,
const PtrStep<unsigned int> image_sum_g, const PtrStep<unsigned long long> image_sqsum_g,
const PtrStep<unsigned int> image_sum_b, const PtrStep<unsigned long long> image_sqsum_b,
const PtrStep<unsigned int> image_sum_a, const PtrStep<unsigned long long> image_sqsum_a,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_r_ = (float)(
(image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) -
(image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x]));
float image_sqsum_r_ = (float)(
(image_sqsum_r.ptr(y + h)[x + w] - image_sqsum_r.ptr(y)[x + w]) -
(image_sqsum_r.ptr(y + h)[x] - image_sqsum_r.ptr(y)[x]));
float image_sum_g_ = (float)(
(image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) -
(image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x]));
float image_sqsum_g_ = (float)(
(image_sqsum_g.ptr(y + h)[x + w] - image_sqsum_g.ptr(y)[x + w]) -
(image_sqsum_g.ptr(y + h)[x] - image_sqsum_g.ptr(y)[x]));
float image_sum_b_ = (float)(
(image_sum_b.ptr(y + h)[x + w] - image_sum_b.ptr(y)[x + w]) -
(image_sum_b.ptr(y + h)[x] - image_sum_b.ptr(y)[x]));
float image_sqsum_b_ = (float)(
(image_sqsum_b.ptr(y + h)[x + w] - image_sqsum_b.ptr(y)[x + w]) -
(image_sqsum_b.ptr(y + h)[x] - image_sqsum_b.ptr(y)[x]));
float image_sum_a_ = (float)(
(image_sum_a.ptr(y + h)[x + w] - image_sum_a.ptr(y)[x + w]) -
(image_sum_a.ptr(y + h)[x] - image_sum_a.ptr(y)[x]));
float image_sqsum_a_ = (float)(
(image_sqsum_a.ptr(y + h)[x + w] - image_sqsum_a.ptr(y)[x + w]) -
(image_sqsum_a.ptr(y + h)[x] - image_sqsum_a.ptr(y)[x]));
float num = result.ptr(y)[x] - image_sum_r_ * templ_sum_scale_r - image_sum_g_ * templ_sum_scale_g
- image_sum_b_ * templ_sum_scale_b - image_sum_a_ * templ_sum_scale_a;
float denum = sqrtf(templ_sqsum_scale * (image_sqsum_r_ - weight * image_sum_r_ * image_sum_r_
+ image_sqsum_g_ - weight * image_sum_g_ * image_sum_g_
+ image_sqsum_b_ - weight * image_sum_b_ * image_sum_b_
+ image_sqsum_a_ - weight * image_sum_a_ * image_sum_a_));
result.ptr(y)[x] = normAcc(num, denum);
}
}
void matchTemplatePrepared_CCOFF_NORMED_8UC4(
int w, int h,
const PtrStepSz<unsigned int> image_sum_r, const PtrStepSz<unsigned long long> image_sqsum_r,
const PtrStepSz<unsigned int> image_sum_g, const PtrStepSz<unsigned long long> image_sqsum_g,
const PtrStepSz<unsigned int> image_sum_b, const PtrStepSz<unsigned long long> image_sqsum_b,
const PtrStepSz<unsigned int> image_sum_a, const PtrStepSz<unsigned long long> image_sqsum_a,
unsigned int templ_sum_r, unsigned long long templ_sqsum_r,
unsigned int templ_sum_g, unsigned long long templ_sqsum_g,
unsigned int templ_sum_b, unsigned long long templ_sqsum_b,
unsigned int templ_sum_a, unsigned long long templ_sqsum_a,
PtrStepSzf result, hipStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
float weight = 1.f / (w * h);
float templ_sum_scale_r = templ_sum_r * weight;
float templ_sum_scale_g = templ_sum_g * weight;
float templ_sum_scale_b = templ_sum_b * weight;
float templ_sum_scale_a = templ_sum_a * weight;
float templ_sqsum_scale = templ_sqsum_r - weight * templ_sum_r * templ_sum_r
+ templ_sqsum_g - weight * templ_sum_g * templ_sum_g
+ templ_sqsum_b - weight * templ_sum_b * templ_sum_b
+ templ_sqsum_a - weight * templ_sum_a * templ_sum_a;
hipLaunchKernelGGL(( matchTemplatePreparedKernel_CCOFF_NORMED_8UC4), dim3(grid), dim3(threads), 0, stream,
w, h, weight,
templ_sum_scale_r, templ_sum_scale_g, templ_sum_scale_b, templ_sum_scale_a,
templ_sqsum_scale,
image_sum_r, image_sqsum_r,
image_sum_g, image_sqsum_g,
image_sum_b, image_sqsum_b,
image_sum_a, image_sqsum_a,
result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////
// normalize
template <int cn>
__global__ void normalizeKernel_8U(
int w, int h, const PtrStep<unsigned long long> image_sqsum,
unsigned long long templ_sqsum, PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sqsum_ = (float)(
(image_sqsum.ptr(y + h)[(x + w) * cn] - image_sqsum.ptr(y)[(x + w) * cn]) -
(image_sqsum.ptr(y + h)[x * cn] - image_sqsum.ptr(y)[x * cn]));
result.ptr(y)[x] = normAcc(result.ptr(y)[x], sqrtf(image_sqsum_ * templ_sqsum));
}
}
void normalize_8U(int w, int h, const PtrStepSz<unsigned long long> image_sqsum,
unsigned long long templ_sqsum, PtrStepSzf result, int cn, hipStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
switch (cn)
{
case 1:
hipLaunchKernelGGL(( normalizeKernel_8U<1>), dim3(grid), dim3(threads), 0, stream, w, h, image_sqsum, templ_sqsum, result);
break;
case 2:
hipLaunchKernelGGL(( normalizeKernel_8U<2>), dim3(grid), dim3(threads), 0, stream, w, h, image_sqsum, templ_sqsum, result);
break;
case 3:
hipLaunchKernelGGL(( normalizeKernel_8U<3>), dim3(grid), dim3(threads), 0, stream, w, h, image_sqsum, templ_sqsum, result);
break;
case 4:
hipLaunchKernelGGL(( normalizeKernel_8U<4>), dim3(grid), dim3(threads), 0, stream, w, h, image_sqsum, templ_sqsum, result);
break;
}
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////
// extractFirstChannel
template <int cn>
__global__ void extractFirstChannel_32F(const PtrStepb image, PtrStepSzf result)
{
typedef typename TypeVec<float, cn>::vec_type Typef;
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
Typef val = ((const Typef*)image.ptr(y))[x];
result.ptr(y)[x] = first(val);
}
}
void extractFirstChannel_32F(const PtrStepSzb image, PtrStepSzf result, int cn, hipStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
switch (cn)
{
case 1:
hipLaunchKernelGGL(( extractFirstChannel_32F<1>), dim3(grid), dim3(threads), 0, stream, image, result);
break;
case 2:
hipLaunchKernelGGL(( extractFirstChannel_32F<2>), dim3(grid), dim3(threads), 0, stream, image, result);
break;
case 3:
hipLaunchKernelGGL(( extractFirstChannel_32F<3>), dim3(grid), dim3(threads), 0, stream, image, result);
break;
case 4:
hipLaunchKernelGGL(( extractFirstChannel_32F<4>), dim3(grid), dim3(threads), 0, stream, image, result);
break;
}
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
} //namespace match_template
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
| a9fa6f80e1941bdde72045f9fa8293dc520346e1.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
namespace cv { namespace gpu { namespace device
{
namespace match_template
{
__device__ __forceinline__ float sum(float v) { return v; }
__device__ __forceinline__ float sum(float2 v) { return v.x + v.y; }
__device__ __forceinline__ float sum(float3 v) { return v.x + v.y + v.z; }
__device__ __forceinline__ float sum(float4 v) { return v.x + v.y + v.z + v.w; }
__device__ __forceinline__ float first(float v) { return v; }
__device__ __forceinline__ float first(float2 v) { return v.x; }
__device__ __forceinline__ float first(float3 v) { return v.x; }
__device__ __forceinline__ float first(float4 v) { return v.x; }
__device__ __forceinline__ float mul(float a, float b) { return a * b; }
__device__ __forceinline__ float2 mul(float2 a, float2 b) { return make_float2(a.x * b.x, a.y * b.y); }
__device__ __forceinline__ float3 mul(float3 a, float3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); }
__device__ __forceinline__ float4 mul(float4 a, float4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); }
__device__ __forceinline__ float mul(uchar a, uchar b) { return a * b; }
__device__ __forceinline__ float2 mul(uchar2 a, uchar2 b) { return make_float2(a.x * b.x, a.y * b.y); }
__device__ __forceinline__ float3 mul(uchar3 a, uchar3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); }
__device__ __forceinline__ float4 mul(uchar4 a, uchar4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); }
__device__ __forceinline__ float sub(float a, float b) { return a - b; }
__device__ __forceinline__ float2 sub(float2 a, float2 b) { return make_float2(a.x - b.x, a.y - b.y); }
__device__ __forceinline__ float3 sub(float3 a, float3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); }
__device__ __forceinline__ float4 sub(float4 a, float4 b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); }
__device__ __forceinline__ float sub(uchar a, uchar b) { return a - b; }
__device__ __forceinline__ float2 sub(uchar2 a, uchar2 b) { return make_float2(a.x - b.x, a.y - b.y); }
__device__ __forceinline__ float3 sub(uchar3 a, uchar3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); }
__device__ __forceinline__ float4 sub(uchar4 a, uchar4 b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); }
//////////////////////////////////////////////////////////////////////
// Naive_CCORR
template <typename T, int cn>
__global__ void matchTemplateNaiveKernel_CCORR(int w, int h, const PtrStepb image, const PtrStepb templ, PtrStepSzf result)
{
typedef typename TypeVec<T, cn>::vec_type Type;
typedef typename TypeVec<float, cn>::vec_type Typef;
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
Typef res = VecTraits<Typef>::all(0);
for (int i = 0; i < h; ++i)
{
const Type* image_ptr = (const Type*)image.ptr(y + i);
const Type* templ_ptr = (const Type*)templ.ptr(i);
for (int j = 0; j < w; ++j)
res = res + mul(image_ptr[x + j], templ_ptr[j]);
}
result.ptr(y)[x] = sum(res);
}
}
template <typename T, int cn>
void matchTemplateNaive_CCORR(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, cudaStream_t stream)
{
const dim3 threads(32, 8);
const dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
matchTemplateNaiveKernel_CCORR<T, cn><<<grid, threads, 0, stream>>>(templ.cols, templ.rows, image, templ, result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void matchTemplateNaive_CCORR_32F(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, int cn, cudaStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, cudaStream_t stream);
static const caller_t callers[] =
{
0, matchTemplateNaive_CCORR<float, 1>, matchTemplateNaive_CCORR<float, 2>, matchTemplateNaive_CCORR<float, 3>, matchTemplateNaive_CCORR<float, 4>
};
callers[cn](image, templ, result, stream);
}
void matchTemplateNaive_CCORR_8U(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, int cn, cudaStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, cudaStream_t stream);
static const caller_t callers[] =
{
0, matchTemplateNaive_CCORR<uchar, 1>, matchTemplateNaive_CCORR<uchar, 2>, matchTemplateNaive_CCORR<uchar, 3>, matchTemplateNaive_CCORR<uchar, 4>
};
callers[cn](image, templ, result, stream);
}
//////////////////////////////////////////////////////////////////////
// Naive_SQDIFF
template <typename T, int cn>
__global__ void matchTemplateNaiveKernel_SQDIFF(int w, int h, const PtrStepb image, const PtrStepb templ, PtrStepSzf result)
{
typedef typename TypeVec<T, cn>::vec_type Type;
typedef typename TypeVec<float, cn>::vec_type Typef;
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
Typef res = VecTraits<Typef>::all(0);
Typef delta;
for (int i = 0; i < h; ++i)
{
const Type* image_ptr = (const Type*)image.ptr(y + i);
const Type* templ_ptr = (const Type*)templ.ptr(i);
for (int j = 0; j < w; ++j)
{
delta = sub(image_ptr[x + j], templ_ptr[j]);
res = res + delta * delta;
}
}
result.ptr(y)[x] = sum(res);
}
}
template <typename T, int cn>
void matchTemplateNaive_SQDIFF(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, cudaStream_t stream)
{
const dim3 threads(32, 8);
const dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
matchTemplateNaiveKernel_SQDIFF<T, cn><<<grid, threads, 0, stream>>>(templ.cols, templ.rows, image, templ, result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void matchTemplateNaive_SQDIFF_32F(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, int cn, cudaStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, cudaStream_t stream);
static const caller_t callers[] =
{
0, matchTemplateNaive_SQDIFF<float, 1>, matchTemplateNaive_SQDIFF<float, 2>, matchTemplateNaive_SQDIFF<float, 3>, matchTemplateNaive_SQDIFF<float, 4>
};
callers[cn](image, templ, result, stream);
}
void matchTemplateNaive_SQDIFF_8U(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, int cn, cudaStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, cudaStream_t stream);
static const caller_t callers[] =
{
0, matchTemplateNaive_SQDIFF<uchar, 1>, matchTemplateNaive_SQDIFF<uchar, 2>, matchTemplateNaive_SQDIFF<uchar, 3>, matchTemplateNaive_SQDIFF<uchar, 4>
};
callers[cn](image, templ, result, stream);
}
//////////////////////////////////////////////////////////////////////
// Prepared_SQDIFF
template <int cn>
__global__ void matchTemplatePreparedKernel_SQDIFF_8U(int w, int h, const PtrStep<unsigned long long> image_sqsum, unsigned long long templ_sqsum, PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sqsum_ = (float)(
(image_sqsum.ptr(y + h)[(x + w) * cn] - image_sqsum.ptr(y)[(x + w) * cn]) -
(image_sqsum.ptr(y + h)[x * cn] - image_sqsum.ptr(y)[x * cn]));
float ccorr = result.ptr(y)[x];
result.ptr(y)[x] = image_sqsum_ - 2.f * ccorr + templ_sqsum;
}
}
template <int cn>
void matchTemplatePrepared_SQDIFF_8U(int w, int h, const PtrStepSz<unsigned long long> image_sqsum, unsigned long long templ_sqsum, PtrStepSzf result, cudaStream_t stream)
{
const dim3 threads(32, 8);
const dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
matchTemplatePreparedKernel_SQDIFF_8U<cn><<<grid, threads, 0, stream>>>(w, h, image_sqsum, templ_sqsum, result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void matchTemplatePrepared_SQDIFF_8U(int w, int h, const PtrStepSz<unsigned long long> image_sqsum, unsigned long long templ_sqsum, PtrStepSzf result, int cn,
cudaStream_t stream)
{
typedef void (*caller_t)(int w, int h, const PtrStepSz<unsigned long long> image_sqsum, unsigned long long templ_sqsum, PtrStepSzf result, cudaStream_t stream);
static const caller_t callers[] =
{
0, matchTemplatePrepared_SQDIFF_8U<1>, matchTemplatePrepared_SQDIFF_8U<2>, matchTemplatePrepared_SQDIFF_8U<3>, matchTemplatePrepared_SQDIFF_8U<4>
};
callers[cn](w, h, image_sqsum, templ_sqsum, result, stream);
}
//////////////////////////////////////////////////////////////////////
// Prepared_SQDIFF_NORMED
// normAcc* are accurate normalization routines which make GPU matchTemplate
// consistent with CPU one
__device__ float normAcc(float num, float denum)
{
if (::fabs(num) < denum)
return num / denum;
if (::fabs(num) < denum * 1.125f)
return num > 0 ? 1 : -1;
return 0;
}
__device__ float normAcc_SQDIFF(float num, float denum)
{
if (::fabs(num) < denum)
return num / denum;
if (::fabs(num) < denum * 1.125f)
return num > 0 ? 1 : -1;
return 1;
}
template <int cn>
__global__ void matchTemplatePreparedKernel_SQDIFF_NORMED_8U(
int w, int h, const PtrStep<unsigned long long> image_sqsum,
unsigned long long templ_sqsum, PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sqsum_ = (float)(
(image_sqsum.ptr(y + h)[(x + w) * cn] - image_sqsum.ptr(y)[(x + w) * cn]) -
(image_sqsum.ptr(y + h)[x * cn] - image_sqsum.ptr(y)[x * cn]));
float ccorr = result.ptr(y)[x];
result.ptr(y)[x] = normAcc_SQDIFF(image_sqsum_ - 2.f * ccorr + templ_sqsum,
sqrtf(image_sqsum_ * templ_sqsum));
}
}
template <int cn>
void matchTemplatePrepared_SQDIFF_NORMED_8U(int w, int h, const PtrStepSz<unsigned long long> image_sqsum, unsigned long long templ_sqsum,
PtrStepSzf result, cudaStream_t stream)
{
const dim3 threads(32, 8);
const dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
matchTemplatePreparedKernel_SQDIFF_NORMED_8U<cn><<<grid, threads, 0, stream>>>(w, h, image_sqsum, templ_sqsum, result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void matchTemplatePrepared_SQDIFF_NORMED_8U(int w, int h, const PtrStepSz<unsigned long long> image_sqsum, unsigned long long templ_sqsum,
PtrStepSzf result, int cn, cudaStream_t stream)
{
typedef void (*caller_t)(int w, int h, const PtrStepSz<unsigned long long> image_sqsum, unsigned long long templ_sqsum, PtrStepSzf result, cudaStream_t stream);
static const caller_t callers[] =
{
0, matchTemplatePrepared_SQDIFF_NORMED_8U<1>, matchTemplatePrepared_SQDIFF_NORMED_8U<2>, matchTemplatePrepared_SQDIFF_NORMED_8U<3>, matchTemplatePrepared_SQDIFF_NORMED_8U<4>
};
callers[cn](w, h, image_sqsum, templ_sqsum, result, stream);
}
//////////////////////////////////////////////////////////////////////
// Prepared_CCOFF
__global__ void matchTemplatePreparedKernel_CCOFF_8U(int w, int h, float templ_sum_scale, const PtrStep<unsigned int> image_sum, PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_ = (float)(
(image_sum.ptr(y + h)[x + w] - image_sum.ptr(y)[x + w]) -
(image_sum.ptr(y + h)[x] - image_sum.ptr(y)[x]));
float ccorr = result.ptr(y)[x];
result.ptr(y)[x] = ccorr - image_sum_ * templ_sum_scale;
}
}
void matchTemplatePrepared_CCOFF_8U(int w, int h, const PtrStepSz<unsigned int> image_sum, unsigned int templ_sum, PtrStepSzf result, cudaStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
matchTemplatePreparedKernel_CCOFF_8U<<<grid, threads, 0, stream>>>(w, h, (float)templ_sum / (w * h), image_sum, result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void matchTemplatePreparedKernel_CCOFF_8UC2(
int w, int h, float templ_sum_scale_r, float templ_sum_scale_g,
const PtrStep<unsigned int> image_sum_r,
const PtrStep<unsigned int> image_sum_g,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_r_ = (float)(
(image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) -
(image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x]));
float image_sum_g_ = (float)(
(image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) -
(image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x]));
float ccorr = result.ptr(y)[x];
result.ptr(y)[x] = ccorr - image_sum_r_ * templ_sum_scale_r
- image_sum_g_ * templ_sum_scale_g;
}
}
void matchTemplatePrepared_CCOFF_8UC2(
int w, int h,
const PtrStepSz<unsigned int> image_sum_r,
const PtrStepSz<unsigned int> image_sum_g,
unsigned int templ_sum_r, unsigned int templ_sum_g,
PtrStepSzf result, cudaStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
matchTemplatePreparedKernel_CCOFF_8UC2<<<grid, threads, 0, stream>>>(
w, h, (float)templ_sum_r / (w * h), (float)templ_sum_g / (w * h),
image_sum_r, image_sum_g, result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void matchTemplatePreparedKernel_CCOFF_8UC3(
int w, int h,
float templ_sum_scale_r,
float templ_sum_scale_g,
float templ_sum_scale_b,
const PtrStep<unsigned int> image_sum_r,
const PtrStep<unsigned int> image_sum_g,
const PtrStep<unsigned int> image_sum_b,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_r_ = (float)(
(image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) -
(image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x]));
float image_sum_g_ = (float)(
(image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) -
(image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x]));
float image_sum_b_ = (float)(
(image_sum_b.ptr(y + h)[x + w] - image_sum_b.ptr(y)[x + w]) -
(image_sum_b.ptr(y + h)[x] - image_sum_b.ptr(y)[x]));
float ccorr = result.ptr(y)[x];
result.ptr(y)[x] = ccorr - image_sum_r_ * templ_sum_scale_r
- image_sum_g_ * templ_sum_scale_g
- image_sum_b_ * templ_sum_scale_b;
}
}
void matchTemplatePrepared_CCOFF_8UC3(
int w, int h,
const PtrStepSz<unsigned int> image_sum_r,
const PtrStepSz<unsigned int> image_sum_g,
const PtrStepSz<unsigned int> image_sum_b,
unsigned int templ_sum_r,
unsigned int templ_sum_g,
unsigned int templ_sum_b,
PtrStepSzf result, cudaStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
matchTemplatePreparedKernel_CCOFF_8UC3<<<grid, threads, 0, stream>>>(
w, h,
(float)templ_sum_r / (w * h),
(float)templ_sum_g / (w * h),
(float)templ_sum_b / (w * h),
image_sum_r, image_sum_g, image_sum_b, result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void matchTemplatePreparedKernel_CCOFF_8UC4(
int w, int h,
float templ_sum_scale_r,
float templ_sum_scale_g,
float templ_sum_scale_b,
float templ_sum_scale_a,
const PtrStep<unsigned int> image_sum_r,
const PtrStep<unsigned int> image_sum_g,
const PtrStep<unsigned int> image_sum_b,
const PtrStep<unsigned int> image_sum_a,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_r_ = (float)(
(image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) -
(image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x]));
float image_sum_g_ = (float)(
(image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) -
(image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x]));
float image_sum_b_ = (float)(
(image_sum_b.ptr(y + h)[x + w] - image_sum_b.ptr(y)[x + w]) -
(image_sum_b.ptr(y + h)[x] - image_sum_b.ptr(y)[x]));
float image_sum_a_ = (float)(
(image_sum_a.ptr(y + h)[x + w] - image_sum_a.ptr(y)[x + w]) -
(image_sum_a.ptr(y + h)[x] - image_sum_a.ptr(y)[x]));
float ccorr = result.ptr(y)[x];
result.ptr(y)[x] = ccorr - image_sum_r_ * templ_sum_scale_r
- image_sum_g_ * templ_sum_scale_g
- image_sum_b_ * templ_sum_scale_b
- image_sum_a_ * templ_sum_scale_a;
}
}
void matchTemplatePrepared_CCOFF_8UC4(
int w, int h,
const PtrStepSz<unsigned int> image_sum_r,
const PtrStepSz<unsigned int> image_sum_g,
const PtrStepSz<unsigned int> image_sum_b,
const PtrStepSz<unsigned int> image_sum_a,
unsigned int templ_sum_r,
unsigned int templ_sum_g,
unsigned int templ_sum_b,
unsigned int templ_sum_a,
PtrStepSzf result, cudaStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
matchTemplatePreparedKernel_CCOFF_8UC4<<<grid, threads, 0, stream>>>(
w, h,
(float)templ_sum_r / (w * h),
(float)templ_sum_g / (w * h),
(float)templ_sum_b / (w * h),
(float)templ_sum_a / (w * h),
image_sum_r, image_sum_g, image_sum_b, image_sum_a,
result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////
// Prepared_CCOFF_NORMED
__global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8U(
int w, int h, float weight,
float templ_sum_scale, float templ_sqsum_scale,
const PtrStep<unsigned int> image_sum,
const PtrStep<unsigned long long> image_sqsum,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float ccorr = result.ptr(y)[x];
float image_sum_ = (float)(
(image_sum.ptr(y + h)[x + w] - image_sum.ptr(y)[x + w]) -
(image_sum.ptr(y + h)[x] - image_sum.ptr(y)[x]));
float image_sqsum_ = (float)(
(image_sqsum.ptr(y + h)[x + w] - image_sqsum.ptr(y)[x + w]) -
(image_sqsum.ptr(y + h)[x] - image_sqsum.ptr(y)[x]));
result.ptr(y)[x] = normAcc(ccorr - image_sum_ * templ_sum_scale,
sqrtf(templ_sqsum_scale * (image_sqsum_ - weight * image_sum_ * image_sum_)));
}
}
void matchTemplatePrepared_CCOFF_NORMED_8U(
int w, int h, const PtrStepSz<unsigned int> image_sum,
const PtrStepSz<unsigned long long> image_sqsum,
unsigned int templ_sum, unsigned long long templ_sqsum,
PtrStepSzf result, cudaStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
float weight = 1.f / (w * h);
float templ_sum_scale = templ_sum * weight;
float templ_sqsum_scale = templ_sqsum - weight * templ_sum * templ_sum;
matchTemplatePreparedKernel_CCOFF_NORMED_8U<<<grid, threads, 0, stream>>>(
w, h, weight, templ_sum_scale, templ_sqsum_scale,
image_sum, image_sqsum, result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC2(
int w, int h, float weight,
float templ_sum_scale_r, float templ_sum_scale_g,
float templ_sqsum_scale,
const PtrStep<unsigned int> image_sum_r, const PtrStep<unsigned long long> image_sqsum_r,
const PtrStep<unsigned int> image_sum_g, const PtrStep<unsigned long long> image_sqsum_g,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_r_ = (float)(
(image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) -
(image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x]));
float image_sqsum_r_ = (float)(
(image_sqsum_r.ptr(y + h)[x + w] - image_sqsum_r.ptr(y)[x + w]) -
(image_sqsum_r.ptr(y + h)[x] - image_sqsum_r.ptr(y)[x]));
float image_sum_g_ = (float)(
(image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) -
(image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x]));
float image_sqsum_g_ = (float)(
(image_sqsum_g.ptr(y + h)[x + w] - image_sqsum_g.ptr(y)[x + w]) -
(image_sqsum_g.ptr(y + h)[x] - image_sqsum_g.ptr(y)[x]));
float num = result.ptr(y)[x] - image_sum_r_ * templ_sum_scale_r
- image_sum_g_ * templ_sum_scale_g;
float denum = sqrtf(templ_sqsum_scale * (image_sqsum_r_ - weight * image_sum_r_ * image_sum_r_
+ image_sqsum_g_ - weight * image_sum_g_ * image_sum_g_));
result.ptr(y)[x] = normAcc(num, denum);
}
}
void matchTemplatePrepared_CCOFF_NORMED_8UC2(
int w, int h,
const PtrStepSz<unsigned int> image_sum_r, const PtrStepSz<unsigned long long> image_sqsum_r,
const PtrStepSz<unsigned int> image_sum_g, const PtrStepSz<unsigned long long> image_sqsum_g,
unsigned int templ_sum_r, unsigned long long templ_sqsum_r,
unsigned int templ_sum_g, unsigned long long templ_sqsum_g,
PtrStepSzf result, cudaStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
float weight = 1.f / (w * h);
float templ_sum_scale_r = templ_sum_r * weight;
float templ_sum_scale_g = templ_sum_g * weight;
float templ_sqsum_scale = templ_sqsum_r - weight * templ_sum_r * templ_sum_r
+ templ_sqsum_g - weight * templ_sum_g * templ_sum_g;
matchTemplatePreparedKernel_CCOFF_NORMED_8UC2<<<grid, threads, 0, stream>>>(
w, h, weight,
templ_sum_scale_r, templ_sum_scale_g,
templ_sqsum_scale,
image_sum_r, image_sqsum_r,
image_sum_g, image_sqsum_g,
result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC3(
int w, int h, float weight,
float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b,
float templ_sqsum_scale,
const PtrStep<unsigned int> image_sum_r, const PtrStep<unsigned long long> image_sqsum_r,
const PtrStep<unsigned int> image_sum_g, const PtrStep<unsigned long long> image_sqsum_g,
const PtrStep<unsigned int> image_sum_b, const PtrStep<unsigned long long> image_sqsum_b,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_r_ = (float)(
(image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) -
(image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x]));
float image_sqsum_r_ = (float)(
(image_sqsum_r.ptr(y + h)[x + w] - image_sqsum_r.ptr(y)[x + w]) -
(image_sqsum_r.ptr(y + h)[x] - image_sqsum_r.ptr(y)[x]));
float image_sum_g_ = (float)(
(image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) -
(image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x]));
float image_sqsum_g_ = (float)(
(image_sqsum_g.ptr(y + h)[x + w] - image_sqsum_g.ptr(y)[x + w]) -
(image_sqsum_g.ptr(y + h)[x] - image_sqsum_g.ptr(y)[x]));
float image_sum_b_ = (float)(
(image_sum_b.ptr(y + h)[x + w] - image_sum_b.ptr(y)[x + w]) -
(image_sum_b.ptr(y + h)[x] - image_sum_b.ptr(y)[x]));
float image_sqsum_b_ = (float)(
(image_sqsum_b.ptr(y + h)[x + w] - image_sqsum_b.ptr(y)[x + w]) -
(image_sqsum_b.ptr(y + h)[x] - image_sqsum_b.ptr(y)[x]));
float num = result.ptr(y)[x] - image_sum_r_ * templ_sum_scale_r
- image_sum_g_ * templ_sum_scale_g
- image_sum_b_ * templ_sum_scale_b;
float denum = sqrtf(templ_sqsum_scale * (image_sqsum_r_ - weight * image_sum_r_ * image_sum_r_
+ image_sqsum_g_ - weight * image_sum_g_ * image_sum_g_
+ image_sqsum_b_ - weight * image_sum_b_ * image_sum_b_));
result.ptr(y)[x] = normAcc(num, denum);
}
}
void matchTemplatePrepared_CCOFF_NORMED_8UC3(
int w, int h,
const PtrStepSz<unsigned int> image_sum_r, const PtrStepSz<unsigned long long> image_sqsum_r,
const PtrStepSz<unsigned int> image_sum_g, const PtrStepSz<unsigned long long> image_sqsum_g,
const PtrStepSz<unsigned int> image_sum_b, const PtrStepSz<unsigned long long> image_sqsum_b,
unsigned int templ_sum_r, unsigned long long templ_sqsum_r,
unsigned int templ_sum_g, unsigned long long templ_sqsum_g,
unsigned int templ_sum_b, unsigned long long templ_sqsum_b,
PtrStepSzf result, cudaStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
float weight = 1.f / (w * h);
float templ_sum_scale_r = templ_sum_r * weight;
float templ_sum_scale_g = templ_sum_g * weight;
float templ_sum_scale_b = templ_sum_b * weight;
float templ_sqsum_scale = templ_sqsum_r - weight * templ_sum_r * templ_sum_r
+ templ_sqsum_g - weight * templ_sum_g * templ_sum_g
+ templ_sqsum_b - weight * templ_sum_b * templ_sum_b;
matchTemplatePreparedKernel_CCOFF_NORMED_8UC3<<<grid, threads, 0, stream>>>(
w, h, weight,
templ_sum_scale_r, templ_sum_scale_g, templ_sum_scale_b,
templ_sqsum_scale,
image_sum_r, image_sqsum_r,
image_sum_g, image_sqsum_g,
image_sum_b, image_sqsum_b,
result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC4(
int w, int h, float weight,
float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b,
float templ_sum_scale_a, float templ_sqsum_scale,
const PtrStep<unsigned int> image_sum_r, const PtrStep<unsigned long long> image_sqsum_r,
const PtrStep<unsigned int> image_sum_g, const PtrStep<unsigned long long> image_sqsum_g,
const PtrStep<unsigned int> image_sum_b, const PtrStep<unsigned long long> image_sqsum_b,
const PtrStep<unsigned int> image_sum_a, const PtrStep<unsigned long long> image_sqsum_a,
PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sum_r_ = (float)(
(image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) -
(image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x]));
float image_sqsum_r_ = (float)(
(image_sqsum_r.ptr(y + h)[x + w] - image_sqsum_r.ptr(y)[x + w]) -
(image_sqsum_r.ptr(y + h)[x] - image_sqsum_r.ptr(y)[x]));
float image_sum_g_ = (float)(
(image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) -
(image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x]));
float image_sqsum_g_ = (float)(
(image_sqsum_g.ptr(y + h)[x + w] - image_sqsum_g.ptr(y)[x + w]) -
(image_sqsum_g.ptr(y + h)[x] - image_sqsum_g.ptr(y)[x]));
float image_sum_b_ = (float)(
(image_sum_b.ptr(y + h)[x + w] - image_sum_b.ptr(y)[x + w]) -
(image_sum_b.ptr(y + h)[x] - image_sum_b.ptr(y)[x]));
float image_sqsum_b_ = (float)(
(image_sqsum_b.ptr(y + h)[x + w] - image_sqsum_b.ptr(y)[x + w]) -
(image_sqsum_b.ptr(y + h)[x] - image_sqsum_b.ptr(y)[x]));
float image_sum_a_ = (float)(
(image_sum_a.ptr(y + h)[x + w] - image_sum_a.ptr(y)[x + w]) -
(image_sum_a.ptr(y + h)[x] - image_sum_a.ptr(y)[x]));
float image_sqsum_a_ = (float)(
(image_sqsum_a.ptr(y + h)[x + w] - image_sqsum_a.ptr(y)[x + w]) -
(image_sqsum_a.ptr(y + h)[x] - image_sqsum_a.ptr(y)[x]));
float num = result.ptr(y)[x] - image_sum_r_ * templ_sum_scale_r - image_sum_g_ * templ_sum_scale_g
- image_sum_b_ * templ_sum_scale_b - image_sum_a_ * templ_sum_scale_a;
float denum = sqrtf(templ_sqsum_scale * (image_sqsum_r_ - weight * image_sum_r_ * image_sum_r_
+ image_sqsum_g_ - weight * image_sum_g_ * image_sum_g_
+ image_sqsum_b_ - weight * image_sum_b_ * image_sum_b_
+ image_sqsum_a_ - weight * image_sum_a_ * image_sum_a_));
result.ptr(y)[x] = normAcc(num, denum);
}
}
void matchTemplatePrepared_CCOFF_NORMED_8UC4(
int w, int h,
const PtrStepSz<unsigned int> image_sum_r, const PtrStepSz<unsigned long long> image_sqsum_r,
const PtrStepSz<unsigned int> image_sum_g, const PtrStepSz<unsigned long long> image_sqsum_g,
const PtrStepSz<unsigned int> image_sum_b, const PtrStepSz<unsigned long long> image_sqsum_b,
const PtrStepSz<unsigned int> image_sum_a, const PtrStepSz<unsigned long long> image_sqsum_a,
unsigned int templ_sum_r, unsigned long long templ_sqsum_r,
unsigned int templ_sum_g, unsigned long long templ_sqsum_g,
unsigned int templ_sum_b, unsigned long long templ_sqsum_b,
unsigned int templ_sum_a, unsigned long long templ_sqsum_a,
PtrStepSzf result, cudaStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
float weight = 1.f / (w * h);
float templ_sum_scale_r = templ_sum_r * weight;
float templ_sum_scale_g = templ_sum_g * weight;
float templ_sum_scale_b = templ_sum_b * weight;
float templ_sum_scale_a = templ_sum_a * weight;
float templ_sqsum_scale = templ_sqsum_r - weight * templ_sum_r * templ_sum_r
+ templ_sqsum_g - weight * templ_sum_g * templ_sum_g
+ templ_sqsum_b - weight * templ_sum_b * templ_sum_b
+ templ_sqsum_a - weight * templ_sum_a * templ_sum_a;
matchTemplatePreparedKernel_CCOFF_NORMED_8UC4<<<grid, threads, 0, stream>>>(
w, h, weight,
templ_sum_scale_r, templ_sum_scale_g, templ_sum_scale_b, templ_sum_scale_a,
templ_sqsum_scale,
image_sum_r, image_sqsum_r,
image_sum_g, image_sqsum_g,
image_sum_b, image_sqsum_b,
image_sum_a, image_sqsum_a,
result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////
// normalize
template <int cn>
__global__ void normalizeKernel_8U(
int w, int h, const PtrStep<unsigned long long> image_sqsum,
unsigned long long templ_sqsum, PtrStepSzf result)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
float image_sqsum_ = (float)(
(image_sqsum.ptr(y + h)[(x + w) * cn] - image_sqsum.ptr(y)[(x + w) * cn]) -
(image_sqsum.ptr(y + h)[x * cn] - image_sqsum.ptr(y)[x * cn]));
result.ptr(y)[x] = normAcc(result.ptr(y)[x], sqrtf(image_sqsum_ * templ_sqsum));
}
}
void normalize_8U(int w, int h, const PtrStepSz<unsigned long long> image_sqsum,
unsigned long long templ_sqsum, PtrStepSzf result, int cn, cudaStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
switch (cn)
{
case 1:
normalizeKernel_8U<1><<<grid, threads, 0, stream>>>(w, h, image_sqsum, templ_sqsum, result);
break;
case 2:
normalizeKernel_8U<2><<<grid, threads, 0, stream>>>(w, h, image_sqsum, templ_sqsum, result);
break;
case 3:
normalizeKernel_8U<3><<<grid, threads, 0, stream>>>(w, h, image_sqsum, templ_sqsum, result);
break;
case 4:
normalizeKernel_8U<4><<<grid, threads, 0, stream>>>(w, h, image_sqsum, templ_sqsum, result);
break;
}
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////
// extractFirstChannel
template <int cn>
__global__ void extractFirstChannel_32F(const PtrStepb image, PtrStepSzf result)
{
typedef typename TypeVec<float, cn>::vec_type Typef;
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < result.cols && y < result.rows)
{
Typef val = ((const Typef*)image.ptr(y))[x];
result.ptr(y)[x] = first(val);
}
}
void extractFirstChannel_32F(const PtrStepSzb image, PtrStepSzf result, int cn, cudaStream_t stream)
{
dim3 threads(32, 8);
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));
switch (cn)
{
case 1:
extractFirstChannel_32F<1><<<grid, threads, 0, stream>>>(image, result);
break;
case 2:
extractFirstChannel_32F<2><<<grid, threads, 0, stream>>>(image, result);
break;
case 3:
extractFirstChannel_32F<3><<<grid, threads, 0, stream>>>(image, result);
break;
case 4:
extractFirstChannel_32F<4><<<grid, threads, 0, stream>>>(image, result);
break;
}
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
} //namespace match_template
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
|
ff13baaf2a8dc0897f77fc166568a6b00ba2d42d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "lodepng.h"
#include "lodepng.cpp"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <chrono>
using namespace std::chrono;
#define THREAD_COUNT_PER_DIM 30
#define INPUT_IMG "input_img.png"
#define PI 3.14159265358979323846
typedef unsigned char byte;
struct Image {
Image(byte* pixels = nullptr, unsigned int width = 0, unsigned int height = 0) : pixels(pixels), width(width), height(height) {
};
byte* pixels;
unsigned int width;
unsigned int height;
};
// Image loading converts to grayscale by default, because colors are not needed in this case.
Image loadPngImage(char* filename) {
unsigned int width, height;
byte* rgbImage;
unsigned error = lodepng_decode_file(&rgbImage, &width, &height, filename, LCT_RGBA, 8);
if (error) {
printf("Error loading image: %u: %s\n", error, lodepng_error_text(error));
exit(2);
}
byte* grayscale = new byte[width * height];
byte* img = rgbImage;
for (int i = 0; i < width * height; ++i) {
int r = *img++; // red
int g = *img++; // green
int b = *img++; // blue
int a = *img++; // opacity
grayscale[i] = 0.3 * r + 0.6 * g + 0.1 * b + 0.5;
}
free(rgbImage);
return Image(grayscale, width, height);
}
void writePngImage(char* filename, std::string appendText, Image outputImage) {
std::string newName = filename;
newName = newName.substr(0, newName.rfind("."));
newName.append("_").append(appendText).append(".png");
unsigned error = lodepng_encode_file(newName.c_str(), outputImage.pixels, outputImage.width, outputImage.height, LCT_GREY, 8);
if (error) {
printf("Error writing image: %u: %s\n", error, lodepng_error_text(error));
exit(3);
}
}
// Sobel X
// -1 0 1
// -2 0 2
// -1 0 1
// Sobel Y
// -1 -2 -1
// 0 0 0
// 1 2 1
// arr[x][y] == arr[y*width + x]
void sobelEdgeDetectionCpu(const byte* original, byte* destination, const unsigned int width, const unsigned int height) {
for (int y = 1; y < height - 1; y++) {
for (int x = 1; x < width - 1; x++) {
int dx = (-1 * original[(y - 1) * width + (x - 1)]) + (-2 * original[y * width + (x - 1)]) + (-1 * original[(y + 1) * width + (x - 1)]) +
(original[(y - 1) * width + (x + 1)]) + (2 * original[y * width + (x + 1)]) + (original[(y + 1) * width + (x + 1)]);
int dy = (original[(y - 1) * width + (x - 1)]) + (2 * original[(y - 1) * width + x]) + (original[(y - 1) * width + (x + 1)]) +
(-1 * original[(y + 1) * width + (x - 1)]) + (-2 * original[(y + 1) * width + x]) + (-1 * original[(y + 1) * width + (x + 1)]);
destination[y * width + x] = sqrt((dx * dx) + (dy * dy));
}
}
}
__global__ void sobelEdgeDetectionGpu(const byte* d_sourceImage, byte* d_destinationImage, const unsigned int width, const unsigned int height) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
float sobelX;
float sobelY;
if (x > 0 && y > 0 && x < width - 1 && y < height - 1) {
sobelX = (-1 * d_sourceImage[(y - 1) * width + (x - 1)]) + (-2 * d_sourceImage[y * width + (x - 1)]) + (-1 * d_sourceImage[(y + 1) * width + (x - 1)]) +
(d_sourceImage[(y - 1) * width + (x + 1)]) + (2 * d_sourceImage[y * width + (x + 1)]) + (d_sourceImage[(y + 1) * width + (x + 1)]);
sobelY = (-1 * d_sourceImage[(y - 1) * width + (x - 1)]) + (-2 * d_sourceImage[(y - 1) * width + x]) + (-1 * d_sourceImage[(y - 1) * width + (x + 1)]) +
(d_sourceImage[(y + 1) * width + (x - 1)]) + (2 * d_sourceImage[(y + 1) * width + x]) + (d_sourceImage[(y + 1) * width + (x + 1)]);
d_destinationImage[y * width + x] = sqrt((sobelX * sobelX) + (sobelY * sobelY));
}
}
void createGaussKernel(double gaussKernel[5])
{
// intialising standard deviation to 1.0
double sigma = 1.0;
double s = 2.0 * sigma * sigma;
double r;
double sum = 0.0;
for (int x = -2; x <= 2; x++) {
for (int y = -2; y <= 2; y++) {
r = sqrt(x * x + y * y);
gaussKernel[x + 2 + (y + 2) * 5] = (exp(-(r * r) / s)) / (PI * s);
sum += gaussKernel[x + 2 + (y + 2) * 5];
}
}
// normalising the Kernel
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 5; ++j) {
gaussKernel[i * 5 + j] /= sum;
}
}
}
void gaussianBlurCpu(const byte* original, byte* destination, double* gKernel, const unsigned int width, const unsigned int height) {
for (int y = 0; y < height - 1; y++)
{
for (int x = 0; x < width - 1; x++)
{
if (x > 0 && y > 0 && x < width - 1 && y < height - 1)
{
double sum = 0;
for (int i = 0; i < 5; i++)
{
for (int j = 0; j < 5; j++)
{
int num;
if (y < 4 || x < 4) {
num = 20;
}
else {
num = original[(y - 2 + i) * width + (x - 2 + j)];
}
sum += num * gKernel[i * 5 + j];
}
}
destination[y * width + x] = round(sum);
}
}
}
}
__device__ double d_gKernel[25];
__global__ void gaussianBlurGpu(const byte* d_sourceImage, byte* d_destinationImage, const unsigned int width, const unsigned int height) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x > 0 && y > 0 && x < width - 1 && y < height - 1)
{
double sum = 0;
for (int i = 0; i < 5; i++)
{
for (int j = 0; j < 5; j++)
{
int num;
if (y < 4 || x < 4) {
num = 20;
}
else {
num = d_sourceImage[(y - 2 + i) * width + (x - 2 + j)];
}
sum += num * d_gKernel[i * 5 + j];
}
}
d_destinationImage[y * width + x] = round(sum);
}
}
int main() {
char* filename = INPUT_IMG;
Image originalImage = loadPngImage(filename);
auto width = originalImage.width;
auto height = originalImage.height;
dim3 threadsPerBlock(THREAD_COUNT_PER_DIM, THREAD_COUNT_PER_DIM);
dim3 numberOfBlocks(ceil(width / THREAD_COUNT_PER_DIM), ceil(height / THREAD_COUNT_PER_DIM));
double gKernel[25];
createGaussKernel(gKernel);
std::ofstream metrics("metrics.txt");
metrics << "gauss-cpu;gauss-gpu;sobel-cpu;sobel-gpu" << std::endl;
for (int i = 0; i < 100; i++)
{
#pragma region Gaussian filter[CPU]
Image gaussDestinationImageCpu(new byte[width * height], width, height);
auto gaussStartCpu = high_resolution_clock::now();
gaussianBlurCpu(originalImage.pixels, gaussDestinationImageCpu.pixels, gKernel, width, height);
auto gaussStopCpu = high_resolution_clock::now();
auto gaussElapsedTimeCpu = duration_cast<microseconds>(gaussStopCpu - gaussStartCpu);
printf("Gaussian blur CPU: %ld ms\n", gaussElapsedTimeCpu.count() / 1000);
writePngImage(filename, "gauss_cpu", gaussDestinationImageCpu);
#pragma endregion
#pragma region Gaussian filter[GPU]
Image gaussDestinationImageGpu(new byte[width * height], width, height);
byte *d_gaussSource;
byte *d_gaussDestination;
hipMalloc((void**)&d_gaussSource, (width * height));
hipMalloc((void**)&d_gaussDestination, (width * height));
hipMemcpy(d_gaussSource, originalImage.pixels, (width * height), hipMemcpyHostToDevice);
hipMemset(d_gaussDestination, 0, (width * height));
hipMemcpyToSymbol(d_gKernel, gKernel, sizeof(double) * 25);
hipEvent_t gaussStart, gaussEnd;
hipEventCreate(&gaussStart);
hipEventCreate(&gaussEnd);
hipEventRecord(gaussStart, 0);
gaussianBlurGpu << <numberOfBlocks, threadsPerBlock >> > (d_gaussSource, d_gaussDestination, width, height);
hipEventRecord(gaussEnd, 0);
hipMemcpy(gaussDestinationImageGpu.pixels, d_gaussDestination, (width * height), hipMemcpyDeviceToHost);
hipEventSynchronize(gaussEnd);
float gaussElapsedTimeGpu = 0.0f;
hipEventElapsedTime(&gaussElapsedTimeGpu, gaussStart, gaussEnd);
printf("Gaussian blur GPU: %f ms\n", gaussElapsedTimeGpu);
writePngImage(filename, "gauss_gpu", gaussDestinationImageGpu);
hipFree(d_gaussSource);
hipFree(d_gaussDestination);
#pragma endregion
#pragma region Sobel edge detection on blurred image[CPU]
Image sobelDestinationImageCpu(new byte[width * height], width, height);
auto sobelStartCpu = high_resolution_clock::now();
sobelEdgeDetectionCpu(gaussDestinationImageCpu.pixels, sobelDestinationImageCpu.pixels, width, height);
auto sobelEndCpu = high_resolution_clock::now();
auto sobelElapsedTimeCpu = duration_cast<microseconds>(sobelEndCpu - sobelStartCpu);
printf("Sobel edge detection CPU: %ld ms\n", sobelElapsedTimeCpu.count() / 1000);
writePngImage(filename, "sobel_cpu", sobelDestinationImageCpu);
#pragma endregion
#pragma region Sobel edge detection on blurred image[GPU]
Image sobelDestinationImageGpu(new byte[width * height], width, height);
byte *d_sobelSource;
byte *d_sobelDestination;
hipMalloc((void**)&d_sobelSource, (width * height));
hipMalloc((void**)&d_sobelDestination, (width * height));
hipMemcpy(d_sobelSource, gaussDestinationImageGpu.pixels, (width * height), hipMemcpyHostToDevice);
hipMemset(d_sobelDestination, 0, (width * height));
hipEvent_t sobelStart, sobelEnd;
hipEventCreate(&sobelStart);
hipEventCreate(&sobelEnd);
hipEventRecord(sobelStart, 0);
sobelEdgeDetectionGpu << <numberOfBlocks, threadsPerBlock >> > (d_sobelSource, d_sobelDestination, width, height);
hipEventRecord(sobelEnd, 0);
hipMemcpy(sobelDestinationImageGpu.pixels, d_sobelDestination, (width * height), hipMemcpyDeviceToHost);
hipEventSynchronize(sobelEnd);
float sobelElapsedTimeGpu = 0.0f;
hipEventElapsedTime(&sobelElapsedTimeGpu, sobelStart, sobelEnd);
printf("Sobel edge detection GPU: %f ms\n", sobelElapsedTimeGpu);
writePngImage(filename, "sobel_gpu", sobelDestinationImageGpu);
hipFree(d_sobelSource);
hipFree(d_sobelDestination);
#pragma endregion
delete[] gaussDestinationImageCpu.pixels;
delete[] gaussDestinationImageGpu.pixels;
delete[] sobelDestinationImageCpu.pixels;
delete[] sobelDestinationImageGpu.pixels;
std::string line = std::to_string(gaussElapsedTimeCpu.count() / 1000) + ";" + std::to_string(gaussElapsedTimeGpu) + ";"
+ std::to_string(sobelElapsedTimeCpu.count() / 1000) + ";" + std::to_string(sobelElapsedTimeGpu);
metrics << line.c_str() << std::endl;
}
metrics.close();
return 0;
}
| ff13baaf2a8dc0897f77fc166568a6b00ba2d42d.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "lodepng.h"
#include "lodepng.cpp"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <chrono>
using namespace std::chrono;
#define THREAD_COUNT_PER_DIM 30
#define INPUT_IMG "input_img.png"
#define PI 3.14159265358979323846
typedef unsigned char byte;
struct Image {
Image(byte* pixels = nullptr, unsigned int width = 0, unsigned int height = 0) : pixels(pixels), width(width), height(height) {
};
byte* pixels;
unsigned int width;
unsigned int height;
};
// Image loading converts to grayscale by default, because colors are not needed in this case.
Image loadPngImage(char* filename) {
unsigned int width, height;
byte* rgbImage;
unsigned error = lodepng_decode_file(&rgbImage, &width, &height, filename, LCT_RGBA, 8);
if (error) {
printf("Error loading image: %u: %s\n", error, lodepng_error_text(error));
exit(2);
}
byte* grayscale = new byte[width * height];
byte* img = rgbImage;
for (int i = 0; i < width * height; ++i) {
int r = *img++; // red
int g = *img++; // green
int b = *img++; // blue
int a = *img++; // opacity
grayscale[i] = 0.3 * r + 0.6 * g + 0.1 * b + 0.5;
}
free(rgbImage);
return Image(grayscale, width, height);
}
void writePngImage(char* filename, std::string appendText, Image outputImage) {
std::string newName = filename;
newName = newName.substr(0, newName.rfind("."));
newName.append("_").append(appendText).append(".png");
unsigned error = lodepng_encode_file(newName.c_str(), outputImage.pixels, outputImage.width, outputImage.height, LCT_GREY, 8);
if (error) {
printf("Error writing image: %u: %s\n", error, lodepng_error_text(error));
exit(3);
}
}
// Sobel X
// -1 0 1
// -2 0 2
// -1 0 1
// Sobel Y
// -1 -2 -1
// 0 0 0
// 1 2 1
// arr[x][y] == arr[y*width + x]
void sobelEdgeDetectionCpu(const byte* original, byte* destination, const unsigned int width, const unsigned int height) {
for (int y = 1; y < height - 1; y++) {
for (int x = 1; x < width - 1; x++) {
int dx = (-1 * original[(y - 1) * width + (x - 1)]) + (-2 * original[y * width + (x - 1)]) + (-1 * original[(y + 1) * width + (x - 1)]) +
(original[(y - 1) * width + (x + 1)]) + (2 * original[y * width + (x + 1)]) + (original[(y + 1) * width + (x + 1)]);
int dy = (original[(y - 1) * width + (x - 1)]) + (2 * original[(y - 1) * width + x]) + (original[(y - 1) * width + (x + 1)]) +
(-1 * original[(y + 1) * width + (x - 1)]) + (-2 * original[(y + 1) * width + x]) + (-1 * original[(y + 1) * width + (x + 1)]);
destination[y * width + x] = sqrt((dx * dx) + (dy * dy));
}
}
}
__global__ void sobelEdgeDetectionGpu(const byte* d_sourceImage, byte* d_destinationImage, const unsigned int width, const unsigned int height) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
float sobelX;
float sobelY;
if (x > 0 && y > 0 && x < width - 1 && y < height - 1) {
sobelX = (-1 * d_sourceImage[(y - 1) * width + (x - 1)]) + (-2 * d_sourceImage[y * width + (x - 1)]) + (-1 * d_sourceImage[(y + 1) * width + (x - 1)]) +
(d_sourceImage[(y - 1) * width + (x + 1)]) + (2 * d_sourceImage[y * width + (x + 1)]) + (d_sourceImage[(y + 1) * width + (x + 1)]);
sobelY = (-1 * d_sourceImage[(y - 1) * width + (x - 1)]) + (-2 * d_sourceImage[(y - 1) * width + x]) + (-1 * d_sourceImage[(y - 1) * width + (x + 1)]) +
(d_sourceImage[(y + 1) * width + (x - 1)]) + (2 * d_sourceImage[(y + 1) * width + x]) + (d_sourceImage[(y + 1) * width + (x + 1)]);
d_destinationImage[y * width + x] = sqrt((sobelX * sobelX) + (sobelY * sobelY));
}
}
void createGaussKernel(double gaussKernel[5])
{
// intialising standard deviation to 1.0
double sigma = 1.0;
double s = 2.0 * sigma * sigma;
double r;
double sum = 0.0;
for (int x = -2; x <= 2; x++) {
for (int y = -2; y <= 2; y++) {
r = sqrt(x * x + y * y);
gaussKernel[x + 2 + (y + 2) * 5] = (exp(-(r * r) / s)) / (PI * s);
sum += gaussKernel[x + 2 + (y + 2) * 5];
}
}
// normalising the Kernel
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 5; ++j) {
gaussKernel[i * 5 + j] /= sum;
}
}
}
void gaussianBlurCpu(const byte* original, byte* destination, double* gKernel, const unsigned int width, const unsigned int height) {
for (int y = 0; y < height - 1; y++)
{
for (int x = 0; x < width - 1; x++)
{
if (x > 0 && y > 0 && x < width - 1 && y < height - 1)
{
double sum = 0;
for (int i = 0; i < 5; i++)
{
for (int j = 0; j < 5; j++)
{
int num;
if (y < 4 || x < 4) {
num = 20;
}
else {
num = original[(y - 2 + i) * width + (x - 2 + j)];
}
sum += num * gKernel[i * 5 + j];
}
}
destination[y * width + x] = round(sum);
}
}
}
}
__device__ double d_gKernel[25];
__global__ void gaussianBlurGpu(const byte* d_sourceImage, byte* d_destinationImage, const unsigned int width, const unsigned int height) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x > 0 && y > 0 && x < width - 1 && y < height - 1)
{
double sum = 0;
for (int i = 0; i < 5; i++)
{
for (int j = 0; j < 5; j++)
{
int num;
if (y < 4 || x < 4) {
num = 20;
}
else {
num = d_sourceImage[(y - 2 + i) * width + (x - 2 + j)];
}
sum += num * d_gKernel[i * 5 + j];
}
}
d_destinationImage[y * width + x] = round(sum);
}
}
int main() {
char* filename = INPUT_IMG;
Image originalImage = loadPngImage(filename);
auto width = originalImage.width;
auto height = originalImage.height;
dim3 threadsPerBlock(THREAD_COUNT_PER_DIM, THREAD_COUNT_PER_DIM);
dim3 numberOfBlocks(ceil(width / THREAD_COUNT_PER_DIM), ceil(height / THREAD_COUNT_PER_DIM));
double gKernel[25];
createGaussKernel(gKernel);
std::ofstream metrics("metrics.txt");
metrics << "gauss-cpu;gauss-gpu;sobel-cpu;sobel-gpu" << std::endl;
for (int i = 0; i < 100; i++)
{
#pragma region Gaussian filter[CPU]
Image gaussDestinationImageCpu(new byte[width * height], width, height);
auto gaussStartCpu = high_resolution_clock::now();
gaussianBlurCpu(originalImage.pixels, gaussDestinationImageCpu.pixels, gKernel, width, height);
auto gaussStopCpu = high_resolution_clock::now();
auto gaussElapsedTimeCpu = duration_cast<microseconds>(gaussStopCpu - gaussStartCpu);
printf("Gaussian blur CPU: %ld ms\n", gaussElapsedTimeCpu.count() / 1000);
writePngImage(filename, "gauss_cpu", gaussDestinationImageCpu);
#pragma endregion
#pragma region Gaussian filter[GPU]
Image gaussDestinationImageGpu(new byte[width * height], width, height);
byte *d_gaussSource;
byte *d_gaussDestination;
cudaMalloc((void**)&d_gaussSource, (width * height));
cudaMalloc((void**)&d_gaussDestination, (width * height));
cudaMemcpy(d_gaussSource, originalImage.pixels, (width * height), cudaMemcpyHostToDevice);
cudaMemset(d_gaussDestination, 0, (width * height));
cudaMemcpyToSymbol(d_gKernel, gKernel, sizeof(double) * 25);
cudaEvent_t gaussStart, gaussEnd;
cudaEventCreate(&gaussStart);
cudaEventCreate(&gaussEnd);
cudaEventRecord(gaussStart, 0);
gaussianBlurGpu << <numberOfBlocks, threadsPerBlock >> > (d_gaussSource, d_gaussDestination, width, height);
cudaEventRecord(gaussEnd, 0);
cudaMemcpy(gaussDestinationImageGpu.pixels, d_gaussDestination, (width * height), cudaMemcpyDeviceToHost);
cudaEventSynchronize(gaussEnd);
float gaussElapsedTimeGpu = 0.0f;
cudaEventElapsedTime(&gaussElapsedTimeGpu, gaussStart, gaussEnd);
printf("Gaussian blur GPU: %f ms\n", gaussElapsedTimeGpu);
writePngImage(filename, "gauss_gpu", gaussDestinationImageGpu);
cudaFree(d_gaussSource);
cudaFree(d_gaussDestination);
#pragma endregion
#pragma region Sobel edge detection on blurred image[CPU]
Image sobelDestinationImageCpu(new byte[width * height], width, height);
auto sobelStartCpu = high_resolution_clock::now();
sobelEdgeDetectionCpu(gaussDestinationImageCpu.pixels, sobelDestinationImageCpu.pixels, width, height);
auto sobelEndCpu = high_resolution_clock::now();
auto sobelElapsedTimeCpu = duration_cast<microseconds>(sobelEndCpu - sobelStartCpu);
printf("Sobel edge detection CPU: %ld ms\n", sobelElapsedTimeCpu.count() / 1000);
writePngImage(filename, "sobel_cpu", sobelDestinationImageCpu);
#pragma endregion
#pragma region Sobel edge detection on blurred image[GPU]
Image sobelDestinationImageGpu(new byte[width * height], width, height);
byte *d_sobelSource;
byte *d_sobelDestination;
cudaMalloc((void**)&d_sobelSource, (width * height));
cudaMalloc((void**)&d_sobelDestination, (width * height));
cudaMemcpy(d_sobelSource, gaussDestinationImageGpu.pixels, (width * height), cudaMemcpyHostToDevice);
cudaMemset(d_sobelDestination, 0, (width * height));
cudaEvent_t sobelStart, sobelEnd;
cudaEventCreate(&sobelStart);
cudaEventCreate(&sobelEnd);
cudaEventRecord(sobelStart, 0);
sobelEdgeDetectionGpu << <numberOfBlocks, threadsPerBlock >> > (d_sobelSource, d_sobelDestination, width, height);
cudaEventRecord(sobelEnd, 0);
cudaMemcpy(sobelDestinationImageGpu.pixels, d_sobelDestination, (width * height), cudaMemcpyDeviceToHost);
cudaEventSynchronize(sobelEnd);
float sobelElapsedTimeGpu = 0.0f;
cudaEventElapsedTime(&sobelElapsedTimeGpu, sobelStart, sobelEnd);
printf("Sobel edge detection GPU: %f ms\n", sobelElapsedTimeGpu);
writePngImage(filename, "sobel_gpu", sobelDestinationImageGpu);
cudaFree(d_sobelSource);
cudaFree(d_sobelDestination);
#pragma endregion
delete[] gaussDestinationImageCpu.pixels;
delete[] gaussDestinationImageGpu.pixels;
delete[] sobelDestinationImageCpu.pixels;
delete[] sobelDestinationImageGpu.pixels;
std::string line = std::to_string(gaussElapsedTimeCpu.count() / 1000) + ";" + std::to_string(gaussElapsedTimeGpu) + ";"
+ std::to_string(sobelElapsedTimeCpu.count() / 1000) + ";" + std::to_string(sobelElapsedTimeGpu);
metrics << line.c_str() << std::endl;
}
metrics.close();
return 0;
}
|
e590c9049d00ad8911ef7398b2f5799a14e3c067.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
#include <iostream>
#include <time.h>
#include <hip/hip_vector_types.h>
#include "opencv2/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "image_processing.h"
#include "image_processing.cuh"
#include "image_processing.cpp"
#include "image_processing_kernels.hip"
using namespace cv;
using namespace std;
int main() {
// Load input file paths
vector<string> image_paths;
image_paths.push_back("./images/image_512x384.png");
image_paths.push_back("./images/image_720x540.png");
image_paths.push_back("./images/image_1024x768.png");
// Iterate through the images, call the necessary kernels and perform measurements
for (int i = 0; i < image_paths.size(); i++) {
string image_path = image_paths.at(i);
Mat img = imread(image_path);
// Check whether the image is imported correctly
if(img.empty())
{
cout << "Could not read the image: " << image_path << endl;
return 1;
}
const unsigned int imgheight = img.rows;
const unsigned int imgwidth = img.cols;
cout << "Loaded " << imgwidth << " " << imgheight << " image." << endl;
// BGR to YUV444 conversion
uchar3 *d_in;
unsigned char *d_out_yuv444;
hipMalloc((void**)&d_in, imgheight*imgwidth*sizeof(uchar3));
hipMalloc((void**)&d_out_yuv444, imgheight*imgwidth*sizeof(unsigned char) * 3);
hipMemcpy(d_in, img.data, imgheight*imgwidth*sizeof(uchar3), hipMemcpyHostToDevice);
double gpu_time = bgr_to_yuv444 (d_in, d_out_yuv444, imgheight, imgwidth);
cout.precision(12);
cout << "Execution time of bgr2yuv444 kernel is: "<< gpu_time << " sec." << endl;
/// YUV444 to YUV422 conversion
unsigned char *d_out_yuv422;
hipMalloc((void**)&d_out_yuv422, imgheight*imgwidth*sizeof(unsigned char) * 2);
gpu_time = yuv444_to_yuv422 (d_out_yuv444, d_out_yuv422, imgheight, imgwidth);
cout << "Execution time of yuv444toyuv422 kernel is: "<< gpu_time << " sec." << endl;
// Conversion to BGR using cvtColor in order to display/save the image
Mat yuv422_image(imgheight, imgwidth, CV_8UC2);
Mat bgr_image(imgheight, imgwidth, CV_8UC3);
hipMemcpy(yuv422_image.data, d_out_yuv422, imgheight*imgwidth*sizeof(unsigned char) * 2, hipMemcpyDeviceToHost);
cvtColor(yuv422_image, bgr_image, CV_YUV2BGR_UYVY);
// Save YUV422 image
string filename = "bgr_to_yuv422_" + to_string(i+1) + ".png";
imwrite(filename, bgr_image);
/// YUV422 to single separate channels conversion
unsigned char *d_y_channel;
hipMalloc((void**)&d_y_channel, imgheight*imgwidth*sizeof(unsigned char));
unsigned char *d_u_channel;
hipMalloc((void**)&d_u_channel, imgheight*imgwidth*sizeof(unsigned char));
unsigned char *d_v_channel;
hipMalloc((void**)&d_v_channel, imgheight*imgwidth*sizeof(unsigned char));
gpu_time = extract_separate_channels(d_out_yuv422, d_y_channel, d_u_channel, d_v_channel, imgheight, imgwidth);
cout << "Execution time of extract_channels kernel is: "<< gpu_time << " sec." << endl;
// Save all 3 channels separately
Mat output_y_channel(imgheight, imgwidth, CV_8UC1);
hipMemcpy(output_y_channel.data, d_y_channel, imgheight*imgwidth*sizeof(unsigned char), hipMemcpyDeviceToHost);
filename = "y_channel_" + to_string(i+1) + ".png";
imwrite(filename, output_y_channel);
Mat output_uv_channels(imgheight, 0.5 * imgwidth, CV_8UC1);
hipMemcpy(output_uv_channels.data, d_u_channel, 0.5 * imgheight*imgwidth*sizeof(unsigned char), hipMemcpyDeviceToHost);
filename = "u_channel_" + to_string(i+1) + ".png";
imwrite(filename, output_uv_channels);
hipMemcpy(output_uv_channels.data, d_v_channel, 0.5 * imgheight*imgwidth*sizeof(unsigned char), hipMemcpyDeviceToHost);
filename = "v_channel_" + to_string(i+1) + ".png";
imwrite(filename, output_uv_channels);
/// YUV422 to BGR conversion
uchar3 *d_out_bgr;
hipMalloc((void**)&d_out_bgr, imgheight*imgwidth*sizeof(uchar3));
gpu_time = yuv422_to_bgr (d_out_yuv422, d_out_bgr, imgheight, imgwidth);
cout << "Execution time of yuv422tobgr kernel is: "<< gpu_time << " sec." << endl;
Mat final_bgr_image(imgheight, imgwidth, CV_8UC3);
hipMemcpy(final_bgr_image.data, d_out_bgr, imgheight*imgwidth*sizeof(uchar3), hipMemcpyDeviceToHost);
// Save final BGR image
filename = "yuv442_to_bgr_" + to_string(i+1) + ".png";
imwrite(filename, output_y_channel);
cout << "-----------------------------------------------------------" << endl;
hipFree(d_in);
hipFree(d_out_yuv444);
hipFree(d_out_yuv422);
hipFree(d_y_channel);
hipFree(d_u_channel);
hipFree(d_v_channel);
hipFree(d_out_bgr);
}
return 0;
} | e590c9049d00ad8911ef7398b2f5799a14e3c067.cu | #include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
#include <iostream>
#include <time.h>
#include <vector_types.h>
#include "opencv2/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "image_processing.h"
#include "image_processing.cuh"
#include "image_processing.cpp"
#include "image_processing_kernels.cu"
using namespace cv;
using namespace std;
int main() {
// Load input file paths
vector<string> image_paths;
image_paths.push_back("./images/image_512x384.png");
image_paths.push_back("./images/image_720x540.png");
image_paths.push_back("./images/image_1024x768.png");
// Iterate through the images, call the necessary kernels and perform measurements
for (int i = 0; i < image_paths.size(); i++) {
string image_path = image_paths.at(i);
Mat img = imread(image_path);
// Check whether the image is imported correctly
if(img.empty())
{
cout << "Could not read the image: " << image_path << endl;
return 1;
}
const unsigned int imgheight = img.rows;
const unsigned int imgwidth = img.cols;
cout << "Loaded " << imgwidth << " × " << imgheight << " image." << endl;
// BGR to YUV444 conversion
uchar3 *d_in;
unsigned char *d_out_yuv444;
cudaMalloc((void**)&d_in, imgheight*imgwidth*sizeof(uchar3));
cudaMalloc((void**)&d_out_yuv444, imgheight*imgwidth*sizeof(unsigned char) * 3);
cudaMemcpy(d_in, img.data, imgheight*imgwidth*sizeof(uchar3), cudaMemcpyHostToDevice);
double gpu_time = bgr_to_yuv444 (d_in, d_out_yuv444, imgheight, imgwidth);
cout.precision(12);
cout << "Execution time of bgr2yuv444 kernel is: "<< gpu_time << " sec." << endl;
/// YUV444 to YUV422 conversion
unsigned char *d_out_yuv422;
cudaMalloc((void**)&d_out_yuv422, imgheight*imgwidth*sizeof(unsigned char) * 2);
gpu_time = yuv444_to_yuv422 (d_out_yuv444, d_out_yuv422, imgheight, imgwidth);
cout << "Execution time of yuv444toyuv422 kernel is: "<< gpu_time << " sec." << endl;
// Conversion to BGR using cvtColor in order to display/save the image
Mat yuv422_image(imgheight, imgwidth, CV_8UC2);
Mat bgr_image(imgheight, imgwidth, CV_8UC3);
cudaMemcpy(yuv422_image.data, d_out_yuv422, imgheight*imgwidth*sizeof(unsigned char) * 2, cudaMemcpyDeviceToHost);
cvtColor(yuv422_image, bgr_image, CV_YUV2BGR_UYVY);
// Save YUV422 image
string filename = "bgr_to_yuv422_" + to_string(i+1) + ".png";
imwrite(filename, bgr_image);
/// YUV422 to single separate channels conversion
unsigned char *d_y_channel;
cudaMalloc((void**)&d_y_channel, imgheight*imgwidth*sizeof(unsigned char));
unsigned char *d_u_channel;
cudaMalloc((void**)&d_u_channel, imgheight*imgwidth*sizeof(unsigned char));
unsigned char *d_v_channel;
cudaMalloc((void**)&d_v_channel, imgheight*imgwidth*sizeof(unsigned char));
gpu_time = extract_separate_channels(d_out_yuv422, d_y_channel, d_u_channel, d_v_channel, imgheight, imgwidth);
cout << "Execution time of extract_channels kernel is: "<< gpu_time << " sec." << endl;
// Save all 3 channels separately
Mat output_y_channel(imgheight, imgwidth, CV_8UC1);
cudaMemcpy(output_y_channel.data, d_y_channel, imgheight*imgwidth*sizeof(unsigned char), cudaMemcpyDeviceToHost);
filename = "y_channel_" + to_string(i+1) + ".png";
imwrite(filename, output_y_channel);
Mat output_uv_channels(imgheight, 0.5 * imgwidth, CV_8UC1);
cudaMemcpy(output_uv_channels.data, d_u_channel, 0.5 * imgheight*imgwidth*sizeof(unsigned char), cudaMemcpyDeviceToHost);
filename = "u_channel_" + to_string(i+1) + ".png";
imwrite(filename, output_uv_channels);
cudaMemcpy(output_uv_channels.data, d_v_channel, 0.5 * imgheight*imgwidth*sizeof(unsigned char), cudaMemcpyDeviceToHost);
filename = "v_channel_" + to_string(i+1) + ".png";
imwrite(filename, output_uv_channels);
/// YUV422 to BGR conversion
uchar3 *d_out_bgr;
cudaMalloc((void**)&d_out_bgr, imgheight*imgwidth*sizeof(uchar3));
gpu_time = yuv422_to_bgr (d_out_yuv422, d_out_bgr, imgheight, imgwidth);
cout << "Execution time of yuv422tobgr kernel is: "<< gpu_time << " sec." << endl;
Mat final_bgr_image(imgheight, imgwidth, CV_8UC3);
cudaMemcpy(final_bgr_image.data, d_out_bgr, imgheight*imgwidth*sizeof(uchar3), cudaMemcpyDeviceToHost);
// Save final BGR image
filename = "yuv442_to_bgr_" + to_string(i+1) + ".png";
imwrite(filename, output_y_channel);
cout << "-----------------------------------------------------------" << endl;
cudaFree(d_in);
cudaFree(d_out_yuv444);
cudaFree(d_out_yuv422);
cudaFree(d_y_channel);
cudaFree(d_u_channel);
cudaFree(d_v_channel);
cudaFree(d_out_bgr);
}
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.